mirror of
https://github.com/xxjwxc/public.git
synced 2025-09-26 20:01:19 +08:00
add hw oss
This commit is contained in:
208
myhwoss/bucket.go
Normal file
208
myhwoss/bucket.go
Normal file
@@ -0,0 +1,208 @@
|
||||
package myhwoss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
obs "github.com/xxjwxc/public/myhwoss/obs"
|
||||
)
|
||||
|
||||
// Bucket 存储桶
|
||||
type Bucket struct {
|
||||
ObsClient *obs.ObsClient
|
||||
BucketName string
|
||||
}
|
||||
|
||||
// BucketInfo 存储桶的信息
|
||||
type BucketInfo struct {
|
||||
Name string `xml:"Name"` // 桶名
|
||||
Location string `xml:"Location"` // Bucket所在的地域
|
||||
CreationDate time.Time `xml:"CreationDate"` // Bucket的创建时间,格式为UTC时间
|
||||
ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket的外网域名
|
||||
IntranetEndpoint string `xml:"IntranetEndpoint"` // 同地域ECS访问Bucket的内网域名
|
||||
ACL string `xml:"AccessControlList>Grant"` // Bucket读写权限(ACL)信息的容器
|
||||
RedundancyType string `xml:"DataRedundancyType"` // Bucket的数据容灾类型
|
||||
StorageClass string `xml:"StorageClass"` // Bucket的存储类型
|
||||
Versioning string `xml:"Versioning"` // Bucket的版本控制状态。有效值:Enabled、Suspended
|
||||
TransferAcceleration string `xml:"TransferAcceleration"` // 显示Bucket的传输加速状态。有效值:Enabled、Disabled
|
||||
CrossRegionReplication string `xml:"CrossRegionReplication"` // 显示Bucket的跨区域复制状态。有效值:Enabled、Disabled
|
||||
}
|
||||
|
||||
// CreateOSSBucket 获取OSS对象存储桶
|
||||
func CreateOSSBucket(endPoint, ak, sk, bucketName string) (*Bucket, error) {
|
||||
// 客户端连接
|
||||
obsClient, err := obs.New(ak, sk, endPoint)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// 判断存储空间是否
|
||||
output, err := obsClient.HeadBucket(bucketName)
|
||||
if err == nil {
|
||||
fmt.Printf("StatusCode:%d, RequestId:%s\n", output.StatusCode, output.RequestId)
|
||||
} else {
|
||||
if _, ok := err.(obs.ObsError); ok {
|
||||
// fmt.Println(obsError.StatusCode)
|
||||
// 创建存储空间,并设置存储类型为标准访问oss.StorageStandard、读写权限ACL为公共读oss.ACLPublicRead、数据容灾类型为本地冗余存储oss.RedundancyLRS
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
input := &obs.CreateBucketInput{}
|
||||
input.Bucket = bucketName
|
||||
// input.StorageClass = obs.StorageClassWarm
|
||||
input.ACL = obs.AclPublicRead
|
||||
_, err := obsClient.CreateBucket(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &Bucket{
|
||||
ObsClient: obsClient,
|
||||
BucketName: bucketName,
|
||||
}, err
|
||||
}
|
||||
|
||||
// GetObjectToFile 下载到本地文件
|
||||
func (b *Bucket) GetObjectToFile(objectKey string, filePath string) error {
|
||||
input := &obs.GetObjectInput{}
|
||||
input.Bucket = b.BucketName
|
||||
input.Key = objectKey
|
||||
output, err := b.ObsClient.GetObject(input, obs.WithProgress(&ObsProgressListener{}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer output.Body.Close()
|
||||
|
||||
flag := os.O_CREATE | os.O_WRONLY | os.O_TRUNC
|
||||
f, err := os.OpenFile(filePath, flag, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
p := make([]byte, 10240)
|
||||
var readErr error
|
||||
var readCount int
|
||||
for {
|
||||
readCount, readErr = output.Body.Read(p)
|
||||
if readCount > 0 {
|
||||
f.Write(p[:readCount])
|
||||
}
|
||||
if readErr != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutObjectFromFileName 上传本地文件
|
||||
func (b *Bucket) PutObjectFromFileName(from string, to string) error {
|
||||
input := &obs.PutFileInput{}
|
||||
input.Bucket = b.BucketName
|
||||
input.Key = to
|
||||
input.SourceFile = from
|
||||
_, err := b.ObsClient.PutFile(input)
|
||||
return err
|
||||
}
|
||||
|
||||
// PutObjectFromFile 上传文件流
|
||||
func (b *Bucket) PutObjectFromFile(from string, to string) error {
|
||||
return b.PutObjectFromFileName(from, to)
|
||||
}
|
||||
|
||||
// PutObjectFromReader 上传文件流
|
||||
func (b *Bucket) PutObjectFromBytes(from []byte, to string) error {
|
||||
input := &obs.PutObjectInput{}
|
||||
input.Bucket = b.BucketName
|
||||
input.Key = to
|
||||
input.Body = bytes.NewReader(from)
|
||||
_, err := b.ObsClient.PutObject(input, obs.WithProgress(&ObsProgressListener{}))
|
||||
return err
|
||||
}
|
||||
|
||||
// IsObjectExist 判断文件是否存在
|
||||
func (b *Bucket) IsObjectExist(filename string) (bool, error) {
|
||||
input := &obs.GetObjectMetadataInput{}
|
||||
input.Bucket = b.BucketName
|
||||
input.Key = filename
|
||||
_, err := b.ObsClient.GetObjectMetadata(input)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// DeleteObject 删除文件
|
||||
func (b *Bucket) DeleteObject(filenames []string) ([]string, error) {
|
||||
input := &obs.DeleteObjectsInput{}
|
||||
input.Bucket = b.BucketName
|
||||
for _, v := range filenames {
|
||||
input.Objects = append(input.Objects, obs.ObjectToDelete{Key: v})
|
||||
}
|
||||
output, err := b.ObsClient.DeleteObjects(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var out []string
|
||||
for _, deleted := range output.Deleteds {
|
||||
out = append(out, deleted.Key)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// // ListObjects 列举文件
|
||||
// func (b *Bucket) ListObjects(filenames []string) ([]string, error) {
|
||||
// lsRes, err := b.Bucket.ListObjects(oss.Marker(marker))
|
||||
// return res.DeletedObjects, err
|
||||
// }
|
||||
|
||||
// SetObjectACL 设置文件的访问权限
|
||||
func (b *Bucket) SetObjectACL(objectKey string, _type []obs.Grant) error {
|
||||
input := &obs.SetObjectAclInput{}
|
||||
input.Bucket = b.BucketName
|
||||
input.Key = objectKey
|
||||
input.ACL = obs.AclPublicRead
|
||||
input.Grants = _type
|
||||
_, err := b.ObsClient.SetObjectAcl(input)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetObjectACL 获取文件的访问权限
|
||||
func (b *Bucket) GetObjectACL(objectKey string) ([]obs.Grant, error) {
|
||||
input := &obs.GetObjectAclInput{}
|
||||
input.Bucket = b.BucketName
|
||||
input.Key = objectKey
|
||||
output, err := b.ObsClient.GetObjectAcl(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return output.Grants, nil
|
||||
}
|
||||
|
||||
// GetObjectMeta 获取文件元信息
|
||||
func (b *Bucket) GetObjectMeta(objectName string) (map[string][]string, error) {
|
||||
input := &obs.GetObjectMetadataInput{}
|
||||
input.Bucket = b.BucketName
|
||||
input.Key = objectName
|
||||
output, err := b.ObsClient.GetObjectMetadata(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mp := make(map[string][]string)
|
||||
mp["ETag"] = append(mp["ETag"], output.ETag)
|
||||
mp["ContentType"] = append(mp["ContentType"], output.ContentType)
|
||||
|
||||
return mp, nil
|
||||
}
|
1
myhwoss/bucket_test.go
Normal file
1
myhwoss/bucket_test.go
Normal file
@@ -0,0 +1 @@
|
||||
package myhwoss
|
1
myhwoss/common.go
Normal file
1
myhwoss/common.go
Normal file
@@ -0,0 +1 @@
|
||||
package myhwoss
|
30
myhwoss/listener.go
Normal file
30
myhwoss/listener.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package myhwoss
|
||||
|
||||
import (
|
||||
obs "github.com/xxjwxc/public/myhwoss/obs"
|
||||
|
||||
"github.com/xxjwxc/public/mylog"
|
||||
)
|
||||
|
||||
// 定义进度条监听器。
|
||||
type ObsProgressListener struct {
|
||||
}
|
||||
|
||||
// 定义进度变更事件处理函数。
|
||||
func (listener *ObsProgressListener) ProgressChanged(event *obs.ProgressEvent) {
|
||||
switch event.EventType {
|
||||
case obs.TransferStartedEvent:
|
||||
mylog.Infof("Transfer Started, ConsumedBytes: %d, TotalBytes %d.\n",
|
||||
event.ConsumedBytes, event.TotalBytes)
|
||||
case obs.TransferDataEvent:
|
||||
mylog.Infof("\rTransfer Data, ConsumedBytes: %d, TotalBytes %d, %d%%.\n",
|
||||
event.ConsumedBytes, event.TotalBytes, event.ConsumedBytes*100/event.TotalBytes)
|
||||
case obs.TransferCompletedEvent:
|
||||
mylog.Infof("\nTransfer Completed, ConsumedBytes: %d, TotalBytes %d.\n",
|
||||
event.ConsumedBytes, event.TotalBytes)
|
||||
case obs.TransferFailedEvent:
|
||||
mylog.Infof("\nTransfer Failed, ConsumedBytes: %d, TotalBytes %d.\n",
|
||||
event.ConsumedBytes, event.TotalBytes)
|
||||
default:
|
||||
}
|
||||
}
|
347
myhwoss/obs/auth.go
Normal file
347
myhwoss/obs/auth.go
Normal file
@@ -0,0 +1,347 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func setURLWithPolicy(bucketName, canonicalizedUrl string) string {
|
||||
if strings.HasPrefix(canonicalizedUrl, "/"+bucketName+"/") {
|
||||
canonicalizedUrl = canonicalizedUrl[len("/"+bucketName+"/"):]
|
||||
} else if strings.HasPrefix(canonicalizedUrl, "/"+bucketName) {
|
||||
canonicalizedUrl = canonicalizedUrl[len("/"+bucketName):]
|
||||
}
|
||||
return canonicalizedUrl
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doAuthTemporary(method, bucketName, objectKey string, policy string, params map[string]string,
|
||||
headers map[string][]string, expires int64) (requestURL string, err error) {
|
||||
sh := obsClient.getSecurity()
|
||||
isAkSkEmpty := sh.ak == "" || sh.sk == ""
|
||||
if isAkSkEmpty == false && sh.securityToken != "" {
|
||||
if obsClient.conf.signature == SignatureObs {
|
||||
params[HEADER_STS_TOKEN_OBS] = sh.securityToken
|
||||
} else {
|
||||
params[HEADER_STS_TOKEN_AMZ] = sh.securityToken
|
||||
}
|
||||
}
|
||||
|
||||
if policy != "" {
|
||||
objectKey = ""
|
||||
}
|
||||
|
||||
requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true)
|
||||
parsedRequestURL, err := url.Parse(requestURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
encodeHeaders(headers)
|
||||
hostName := parsedRequestURL.Host
|
||||
|
||||
isV4 := obsClient.conf.signature == SignatureV4
|
||||
prepareHostAndDate(headers, hostName, isV4)
|
||||
|
||||
if isAkSkEmpty {
|
||||
doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization")
|
||||
} else {
|
||||
if isV4 {
|
||||
date, parseDateErr := time.Parse(RFC1123_FORMAT, headers[HEADER_DATE_CAMEL][0])
|
||||
if parseDateErr != nil {
|
||||
doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr)
|
||||
return "", parseDateErr
|
||||
}
|
||||
delete(headers, HEADER_DATE_CAMEL)
|
||||
shortDate := date.Format(SHORT_DATE_FORMAT)
|
||||
longDate := date.Format(LONG_DATE_FORMAT)
|
||||
if len(headers[HEADER_HOST_CAMEL]) != 0 {
|
||||
index := strings.LastIndex(headers[HEADER_HOST_CAMEL][0], ":")
|
||||
if index != -1 {
|
||||
port := headers[HEADER_HOST_CAMEL][0][index+1:]
|
||||
if port == "80" || port == "443" {
|
||||
headers[HEADER_HOST_CAMEL] = []string{headers[HEADER_HOST_CAMEL][0][:index]}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
signedHeaders, _headers := getSignedHeaders(headers)
|
||||
|
||||
credential, scope := getCredential(sh.ak, obsClient.conf.region, shortDate)
|
||||
params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX
|
||||
params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
|
||||
params[PARAM_DATE_AMZ_CAMEL] = longDate
|
||||
params[PARAM_EXPIRES_AMZ_CAMEL] = Int64ToString(expires)
|
||||
params[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = strings.Join(signedHeaders, ";")
|
||||
|
||||
requestURL, canonicalizedURL = obsClient.conf.formatUrls(bucketName, objectKey, params, true)
|
||||
parsedRequestURL, _err := url.Parse(requestURL)
|
||||
if _err != nil {
|
||||
return "", _err
|
||||
}
|
||||
|
||||
stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, signedHeaders, _headers)
|
||||
signature := getSignature(stringToSign, sh.sk, obsClient.conf.region, shortDate)
|
||||
|
||||
requestURL += fmt.Sprintf("&%s=%s", PARAM_SIGNATURE_AMZ_CAMEL, UrlEncode(signature, false))
|
||||
|
||||
} else {
|
||||
originDate := headers[HEADER_DATE_CAMEL][0]
|
||||
date, parseDateErr := time.Parse(RFC1123_FORMAT, originDate)
|
||||
if parseDateErr != nil {
|
||||
doLog(LEVEL_WARN, "Failed to parse date with reason: %v", parseDateErr)
|
||||
return "", parseDateErr
|
||||
}
|
||||
expires += date.Unix()
|
||||
if policy == "" {
|
||||
headers[HEADER_DATE_CAMEL] = []string{Int64ToString(expires)}
|
||||
} else {
|
||||
policy = Base64Encode([]byte(policy))
|
||||
headers[HEADER_DATE_CAMEL] = []string{policy}
|
||||
canonicalizedURL = setURLWithPolicy(bucketName, canonicalizedURL)
|
||||
}
|
||||
|
||||
stringToSign := getV2StringToSign(method, canonicalizedURL, headers, obsClient.conf.signature == SignatureObs)
|
||||
signature := UrlEncode(Base64Encode(HmacSha1([]byte(sh.sk), []byte(stringToSign))), false)
|
||||
if strings.Index(requestURL, "?") < 0 {
|
||||
requestURL += "?"
|
||||
} else {
|
||||
requestURL += "&"
|
||||
}
|
||||
delete(headers, HEADER_DATE_CAMEL)
|
||||
|
||||
if obsClient.conf.signature != SignatureObs {
|
||||
requestURL += "AWS"
|
||||
}
|
||||
if policy == "" {
|
||||
requestURL += fmt.Sprintf("AccessKeyId=%s&Expires=%d&Signature=%s", UrlEncode(sh.ak, false),
|
||||
expires, signature)
|
||||
return
|
||||
|
||||
}
|
||||
requestURL += fmt.Sprintf("AccessKeyId=%s&Policy=%s&Signature=%s", UrlEncode(sh.ak, false),
|
||||
UrlEncode(policy, false), signature)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doAuth(method, bucketName, objectKey string, params map[string]string,
|
||||
headers map[string][]string, hostName string) (requestURL string, err error) {
|
||||
sh := obsClient.getSecurity()
|
||||
isAkSkEmpty := sh.ak == "" || sh.sk == ""
|
||||
if isAkSkEmpty == false && sh.securityToken != "" {
|
||||
if obsClient.conf.signature == SignatureObs {
|
||||
headers[HEADER_STS_TOKEN_OBS] = []string{sh.securityToken}
|
||||
} else {
|
||||
headers[HEADER_STS_TOKEN_AMZ] = []string{sh.securityToken}
|
||||
}
|
||||
}
|
||||
isObs := obsClient.conf.signature == SignatureObs
|
||||
requestURL, canonicalizedURL := obsClient.conf.formatUrls(bucketName, objectKey, params, true)
|
||||
parsedRequestURL, err := url.Parse(requestURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
encodeHeaders(headers)
|
||||
|
||||
if hostName == "" {
|
||||
hostName = parsedRequestURL.Host
|
||||
}
|
||||
|
||||
isV4 := obsClient.conf.signature == SignatureV4
|
||||
prepareHostAndDate(headers, hostName, isV4)
|
||||
|
||||
if isAkSkEmpty {
|
||||
doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization")
|
||||
} else {
|
||||
ak := sh.ak
|
||||
sk := sh.sk
|
||||
var authorization string
|
||||
if isV4 {
|
||||
headers[HEADER_CONTENT_SHA256_AMZ] = []string{UNSIGNED_PAYLOAD}
|
||||
ret := v4Auth(ak, sk, obsClient.conf.region, method, canonicalizedURL, parsedRequestURL.RawQuery, headers)
|
||||
authorization = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"])
|
||||
} else {
|
||||
ret := v2Auth(ak, sk, method, canonicalizedURL, headers, isObs)
|
||||
hashPrefix := V2_HASH_PREFIX
|
||||
if isObs {
|
||||
hashPrefix = OBS_HASH_PREFIX
|
||||
}
|
||||
authorization = fmt.Sprintf("%s %s:%s", hashPrefix, ak, ret["Signature"])
|
||||
}
|
||||
headers[HEADER_AUTH_CAMEL] = []string{authorization}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func prepareHostAndDate(headers map[string][]string, hostName string, isV4 bool) {
|
||||
headers[HEADER_HOST_CAMEL] = []string{hostName}
|
||||
if date, ok := headers[HEADER_DATE_AMZ]; ok {
|
||||
flag := false
|
||||
if len(date) == 1 {
|
||||
if isV4 {
|
||||
if t, err := time.Parse(LONG_DATE_FORMAT, date[0]); err == nil {
|
||||
headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(t)}
|
||||
flag = true
|
||||
}
|
||||
} else {
|
||||
if strings.HasSuffix(date[0], "GMT") {
|
||||
headers[HEADER_DATE_CAMEL] = []string{date[0]}
|
||||
flag = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !flag {
|
||||
delete(headers, HEADER_DATE_AMZ)
|
||||
}
|
||||
}
|
||||
if _, ok := headers[HEADER_DATE_CAMEL]; !ok {
|
||||
headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(time.Now().UTC())}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func encodeHeaders(headers map[string][]string) {
|
||||
for key, values := range headers {
|
||||
for index, value := range values {
|
||||
values[index] = UrlEncode(value, true)
|
||||
}
|
||||
headers[key] = values
|
||||
}
|
||||
}
|
||||
|
||||
func prepareDateHeader(dataHeader, dateCamelHeader string, headers, _headers map[string][]string) {
|
||||
if _, ok := _headers[HEADER_DATE_CAMEL]; ok {
|
||||
if _, ok := _headers[dataHeader]; ok {
|
||||
_headers[HEADER_DATE_CAMEL] = []string{""}
|
||||
} else if _, ok := headers[dateCamelHeader]; ok {
|
||||
_headers[HEADER_DATE_CAMEL] = []string{""}
|
||||
}
|
||||
} else if _, ok := _headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok {
|
||||
if _, ok := _headers[dataHeader]; ok {
|
||||
_headers[HEADER_DATE_CAMEL] = []string{""}
|
||||
} else if _, ok := headers[dateCamelHeader]; ok {
|
||||
_headers[HEADER_DATE_CAMEL] = []string{""}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getStringToSign(keys []string, isObs bool, _headers map[string][]string) []string {
|
||||
stringToSign := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
var value string
|
||||
prefixHeader := HEADER_PREFIX
|
||||
prefixMetaHeader := HEADER_PREFIX_META
|
||||
if isObs {
|
||||
prefixHeader = HEADER_PREFIX_OBS
|
||||
prefixMetaHeader = HEADER_PREFIX_META_OBS
|
||||
}
|
||||
if strings.HasPrefix(key, prefixHeader) {
|
||||
if strings.HasPrefix(key, prefixMetaHeader) {
|
||||
for index, v := range _headers[key] {
|
||||
value += strings.TrimSpace(v)
|
||||
if index != len(_headers[key])-1 {
|
||||
value += ","
|
||||
}
|
||||
}
|
||||
} else {
|
||||
value = strings.Join(_headers[key], ",")
|
||||
}
|
||||
value = fmt.Sprintf("%s:%s", key, value)
|
||||
} else {
|
||||
value = strings.Join(_headers[key], ",")
|
||||
}
|
||||
stringToSign = append(stringToSign, value)
|
||||
}
|
||||
return stringToSign
|
||||
}
|
||||
|
||||
func attachHeaders(headers map[string][]string, isObs bool) string {
|
||||
length := len(headers)
|
||||
_headers := make(map[string][]string, length)
|
||||
keys := make([]string, 0, length)
|
||||
|
||||
for key, value := range headers {
|
||||
_key := strings.ToLower(strings.TrimSpace(key))
|
||||
if _key != "" {
|
||||
prefixheader := HEADER_PREFIX
|
||||
if isObs {
|
||||
prefixheader = HEADER_PREFIX_OBS
|
||||
}
|
||||
if _key == "content-md5" || _key == "content-type" || _key == "date" || strings.HasPrefix(_key, prefixheader) {
|
||||
keys = append(keys, _key)
|
||||
_headers[_key] = value
|
||||
}
|
||||
} else {
|
||||
delete(headers, key)
|
||||
}
|
||||
}
|
||||
|
||||
for _, interestedHeader := range interestedHeaders {
|
||||
if _, ok := _headers[interestedHeader]; !ok {
|
||||
_headers[interestedHeader] = []string{""}
|
||||
keys = append(keys, interestedHeader)
|
||||
}
|
||||
}
|
||||
dateCamelHeader := PARAM_DATE_AMZ_CAMEL
|
||||
dataHeader := HEADER_DATE_AMZ
|
||||
if isObs {
|
||||
dateCamelHeader = PARAM_DATE_OBS_CAMEL
|
||||
dataHeader = HEADER_DATE_OBS
|
||||
}
|
||||
prepareDateHeader(dataHeader, dateCamelHeader, headers, _headers)
|
||||
|
||||
sort.Strings(keys)
|
||||
stringToSign := getStringToSign(keys, isObs, _headers)
|
||||
return strings.Join(stringToSign, "\n")
|
||||
}
|
||||
|
||||
func getScope(region, shortDate string) string {
|
||||
return fmt.Sprintf("%s/%s/%s/%s", shortDate, region, V4_SERVICE_NAME, V4_SERVICE_SUFFIX)
|
||||
}
|
||||
|
||||
func getCredential(ak, region, shortDate string) (string, string) {
|
||||
scope := getScope(region, shortDate)
|
||||
return fmt.Sprintf("%s/%s", ak, scope), scope
|
||||
}
|
||||
|
||||
func getSignedHeaders(headers map[string][]string) ([]string, map[string][]string) {
|
||||
length := len(headers)
|
||||
_headers := make(map[string][]string, length)
|
||||
signedHeaders := make([]string, 0, length)
|
||||
for key, value := range headers {
|
||||
_key := strings.ToLower(strings.TrimSpace(key))
|
||||
if _key != "" {
|
||||
signedHeaders = append(signedHeaders, _key)
|
||||
_headers[_key] = value
|
||||
} else {
|
||||
delete(headers, key)
|
||||
}
|
||||
}
|
||||
sort.Strings(signedHeaders)
|
||||
return signedHeaders, _headers
|
||||
}
|
||||
|
||||
func getSignature(stringToSign, sk, region, shortDate string) string {
|
||||
key := HmacSha256([]byte(V4_HASH_PRE+sk), []byte(shortDate))
|
||||
key = HmacSha256(key, []byte(region))
|
||||
key = HmacSha256(key, []byte(V4_SERVICE_NAME))
|
||||
key = HmacSha256(key, []byte(V4_SERVICE_SUFFIX))
|
||||
return Hex(HmacSha256(key, []byte(stringToSign)))
|
||||
}
|
55
myhwoss/obs/authV2.go
Normal file
55
myhwoss/obs/authV2.go
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func getV2StringToSign(method, canonicalizedURL string, headers map[string][]string, isObs bool) string {
|
||||
stringToSign := strings.Join([]string{method, "\n", attachHeaders(headers, isObs), "\n", canonicalizedURL}, "")
|
||||
|
||||
var isSecurityToken bool
|
||||
var securityToken []string
|
||||
if isObs {
|
||||
securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]
|
||||
} else {
|
||||
securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]
|
||||
}
|
||||
var query []string
|
||||
if !isSecurityToken {
|
||||
parmas := strings.Split(canonicalizedURL, "?")
|
||||
if len(parmas) > 1 {
|
||||
query = strings.Split(parmas[1], "&")
|
||||
for _, value := range query {
|
||||
if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
|
||||
if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
|
||||
securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]}
|
||||
isSecurityToken = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logStringToSign := stringToSign
|
||||
if isSecurityToken && len(securityToken) > 0 {
|
||||
logStringToSign = strings.Replace(logStringToSign, securityToken[0], "******", -1)
|
||||
}
|
||||
doLog(LEVEL_DEBUG, "The v2 auth stringToSign:\n%s", logStringToSign)
|
||||
return stringToSign
|
||||
}
|
||||
|
||||
func v2Auth(ak, sk, method, canonicalizedURL string, headers map[string][]string, isObs bool) map[string]string {
|
||||
stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs)
|
||||
return map[string]string{"Signature": Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign)))}
|
||||
}
|
136
myhwoss/obs/authV4.go
Normal file
136
myhwoss/obs/authV4.go
Normal file
@@ -0,0 +1,136 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload string, signedHeaders []string, headers map[string][]string) string {
|
||||
canonicalRequest := make([]string, 0, 10+len(signedHeaders)*4)
|
||||
canonicalRequest = append(canonicalRequest, method)
|
||||
canonicalRequest = append(canonicalRequest, "\n")
|
||||
canonicalRequest = append(canonicalRequest, canonicalizedURL)
|
||||
canonicalRequest = append(canonicalRequest, "\n")
|
||||
canonicalRequest = append(canonicalRequest, queryURL)
|
||||
canonicalRequest = append(canonicalRequest, "\n")
|
||||
|
||||
for _, signedHeader := range signedHeaders {
|
||||
values, _ := headers[signedHeader]
|
||||
for _, value := range values {
|
||||
canonicalRequest = append(canonicalRequest, signedHeader)
|
||||
canonicalRequest = append(canonicalRequest, ":")
|
||||
canonicalRequest = append(canonicalRequest, value)
|
||||
canonicalRequest = append(canonicalRequest, "\n")
|
||||
}
|
||||
}
|
||||
canonicalRequest = append(canonicalRequest, "\n")
|
||||
canonicalRequest = append(canonicalRequest, strings.Join(signedHeaders, ";"))
|
||||
canonicalRequest = append(canonicalRequest, "\n")
|
||||
canonicalRequest = append(canonicalRequest, payload)
|
||||
|
||||
_canonicalRequest := strings.Join(canonicalRequest, "")
|
||||
|
||||
var isSecurityToken bool
|
||||
var securityToken []string
|
||||
if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; !isSecurityToken {
|
||||
securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]
|
||||
}
|
||||
var query []string
|
||||
if !isSecurityToken {
|
||||
query = strings.Split(queryURL, "&")
|
||||
for _, value := range query {
|
||||
if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
|
||||
if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
|
||||
securityToken = []string{value[len(HEADER_STS_TOKEN_AMZ)+1:]}
|
||||
isSecurityToken = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logCanonicalRequest := _canonicalRequest
|
||||
if isSecurityToken && len(securityToken) > 0 {
|
||||
logCanonicalRequest = strings.Replace(logCanonicalRequest, securityToken[0], "******", -1)
|
||||
}
|
||||
doLog(LEVEL_DEBUG, "The v4 auth canonicalRequest:\n%s", logCanonicalRequest)
|
||||
|
||||
stringToSign := make([]string, 0, 7)
|
||||
stringToSign = append(stringToSign, V4_HASH_PREFIX)
|
||||
stringToSign = append(stringToSign, "\n")
|
||||
stringToSign = append(stringToSign, longDate)
|
||||
stringToSign = append(stringToSign, "\n")
|
||||
stringToSign = append(stringToSign, scope)
|
||||
stringToSign = append(stringToSign, "\n")
|
||||
stringToSign = append(stringToSign, HexSha256([]byte(_canonicalRequest)))
|
||||
|
||||
_stringToSign := strings.Join(stringToSign, "")
|
||||
|
||||
return _stringToSign
|
||||
}
|
||||
|
||||
// V4Auth is a wrapper for v4Auth
|
||||
func V4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string {
|
||||
return v4Auth(ak, sk, region, method, canonicalizedURL, queryURL, headers)
|
||||
}
|
||||
|
||||
func v4Auth(ak, sk, region, method, canonicalizedURL, queryURL string, headers map[string][]string) map[string]string {
|
||||
var t time.Time
|
||||
if val, ok := headers[HEADER_DATE_AMZ]; ok {
|
||||
var err error
|
||||
t, err = time.Parse(LONG_DATE_FORMAT, val[0])
|
||||
if err != nil {
|
||||
t = time.Now().UTC()
|
||||
}
|
||||
} else if val, ok := headers[PARAM_DATE_AMZ_CAMEL]; ok {
|
||||
var err error
|
||||
t, err = time.Parse(LONG_DATE_FORMAT, val[0])
|
||||
if err != nil {
|
||||
t = time.Now().UTC()
|
||||
}
|
||||
} else if val, ok := headers[HEADER_DATE_CAMEL]; ok {
|
||||
var err error
|
||||
t, err = time.Parse(RFC1123_FORMAT, val[0])
|
||||
if err != nil {
|
||||
t = time.Now().UTC()
|
||||
}
|
||||
} else if val, ok := headers[strings.ToLower(HEADER_DATE_CAMEL)]; ok {
|
||||
var err error
|
||||
t, err = time.Parse(RFC1123_FORMAT, val[0])
|
||||
if err != nil {
|
||||
t = time.Now().UTC()
|
||||
}
|
||||
} else {
|
||||
t = time.Now().UTC()
|
||||
}
|
||||
shortDate := t.Format(SHORT_DATE_FORMAT)
|
||||
longDate := t.Format(LONG_DATE_FORMAT)
|
||||
|
||||
signedHeaders, _headers := getSignedHeaders(headers)
|
||||
|
||||
credential, scope := getCredential(ak, region, shortDate)
|
||||
|
||||
payload := UNSIGNED_PAYLOAD
|
||||
if val, ok := headers[HEADER_CONTENT_SHA256_AMZ]; ok {
|
||||
payload = val[0]
|
||||
}
|
||||
stringToSign := getV4StringToSign(method, canonicalizedURL, queryURL, scope, longDate, payload, signedHeaders, _headers)
|
||||
|
||||
signature := getSignature(stringToSign, sk, region, shortDate)
|
||||
|
||||
ret := make(map[string]string, 3)
|
||||
ret["Credential"] = credential
|
||||
ret["SignedHeaders"] = strings.Join(signedHeaders, ";")
|
||||
ret["Signature"] = signature
|
||||
return ret
|
||||
}
|
49
myhwoss/obs/callback.go
Normal file
49
myhwoss/obs/callback.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type ICallbackReadCloser interface {
|
||||
setCallbackReadCloser(body io.ReadCloser)
|
||||
}
|
||||
|
||||
func (output *PutObjectOutput) setCallbackReadCloser(body io.ReadCloser) {
|
||||
output.CallbackBody.data = body
|
||||
}
|
||||
|
||||
func (output *CompleteMultipartUploadOutput) setCallbackReadCloser(body io.ReadCloser) {
|
||||
output.CallbackBody.data = body
|
||||
}
|
||||
|
||||
// define CallbackBody
|
||||
type CallbackBody struct {
|
||||
data io.ReadCloser
|
||||
}
|
||||
|
||||
func (output CallbackBody) ReadCallbackBody(p []byte) (int, error) {
|
||||
if output.data == nil {
|
||||
return 0, errors.New("have no callback data")
|
||||
}
|
||||
return output.data.Read(p)
|
||||
}
|
||||
|
||||
func (output CallbackBody) CloseCallbackBody() error {
|
||||
if output.data == nil {
|
||||
return errors.New("have no callback data")
|
||||
}
|
||||
return output.data.Close()
|
||||
}
|
68
myhwoss/obs/client_base.go
Normal file
68
myhwoss/obs/client_base.go
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ObsClient defines OBS client.
|
||||
type ObsClient struct {
|
||||
conf *config
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// New creates a new ObsClient instance.
|
||||
func New(ak, sk, endpoint string, configurers ...configurer) (*ObsClient, error) {
|
||||
conf := &config{endpoint: endpoint}
|
||||
conf.securityProviders = make([]securityProvider, 0, 3)
|
||||
conf.securityProviders = append(conf.securityProviders, NewBasicSecurityProvider(ak, sk, ""))
|
||||
|
||||
conf.maxRetryCount = -1
|
||||
conf.maxRedirectCount = -1
|
||||
for _, configurer := range configurers {
|
||||
configurer(conf)
|
||||
}
|
||||
|
||||
if err := conf.initConfigWithDefault(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err := conf.getTransport()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isWarnLogEnabled() {
|
||||
info := make([]string, 3)
|
||||
info[0] = fmt.Sprintf("[OBS SDK Version=%s", OBS_SDK_VERSION)
|
||||
info[1] = fmt.Sprintf("Endpoint=%s", conf.endpoint)
|
||||
accessMode := "Virtual Hosting"
|
||||
if conf.pathStyle {
|
||||
accessMode = "Path"
|
||||
}
|
||||
info[2] = fmt.Sprintf("Access Mode=%s]", accessMode)
|
||||
doLog(LEVEL_WARN, strings.Join(info, "];["))
|
||||
}
|
||||
|
||||
if conf.httpClient != nil {
|
||||
doLog(LEVEL_DEBUG, "Create obsclient with config:\n%s\n", conf)
|
||||
obsClient := &ObsClient{conf: conf, httpClient: conf.httpClient}
|
||||
return obsClient, nil
|
||||
}
|
||||
|
||||
doLog(LEVEL_DEBUG, "Create obsclient with config:\n%s\n", conf)
|
||||
obsClient := &ObsClient{conf: conf, httpClient: &http.Client{Transport: conf.transport, CheckRedirect: checkRedirectFunc}}
|
||||
return obsClient, nil
|
||||
}
|
805
myhwoss/obs/client_bucket.go
Normal file
805
myhwoss/obs/client_bucket.go
Normal file
@@ -0,0 +1,805 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (obsClient ObsClient) DeleteBucketCustomDomain(input *DeleteBucketCustomDomainInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("DeleteBucketCustomDomainInput is nil")
|
||||
}
|
||||
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("DeleteBucketCustomDomain", HTTP_DELETE, input.Bucket, newSubResourceSerialV2(SubResourceCustomDomain, input.CustomDomain), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) SetBucketCustomDomain(input *SetBucketCustomDomainInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketCustomDomainInput is nil")
|
||||
}
|
||||
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketCustomDomain", HTTP_PUT, input.Bucket, newSubResourceSerialV2(SubResourceCustomDomain, input.CustomDomain), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) GetBucketCustomDomain(bucketName string, extensions ...extensionOptions) (output *GetBucketCustomDomainOuput, err error) {
|
||||
output = &GetBucketCustomDomainOuput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketCustomDomain", HTTP_GET, bucketName, newSubResourceSerial(SubResourceCustomDomain), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) SetBucketMirrorBackToSource(input *SetBucketMirrorBackToSourceInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketMirrorBackToSource", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) DeleteBucketMirrorBackToSource(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucketV2("DeleteBucketMirrorBackToSource", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceMirrorBackToSource), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) GetBucketMirrorBackToSource(bucketName string, extensions ...extensionOptions) (output *GetBucketMirrorBackToSourceOuput, err error) {
|
||||
output = &GetBucketMirrorBackToSourceOuput{}
|
||||
err = obsClient.doActionWithBucketV2("GetBucketMirrorBackToSource", HTTP_GET, bucketName, newSubResourceSerial(SubResourceMirrorBackToSource), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListBuckets lists buckets.
|
||||
//
|
||||
// You can use this API to obtain the bucket list. In the list, bucket names are displayed in lexicographical order.
|
||||
func (obsClient ObsClient) ListBuckets(input *ListBucketsInput, extensions ...extensionOptions) (output *ListBucketsOutput, err error) {
|
||||
if input == nil {
|
||||
input = &ListBucketsInput{}
|
||||
}
|
||||
output = &ListBucketsOutput{}
|
||||
err = obsClient.doActionWithoutBucket("ListBuckets", HTTP_GET, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CreateBucket creates a bucket.
|
||||
//
|
||||
// You can use this API to create a bucket and name it as you specify. The created bucket name must be unique in OBS.
|
||||
func (obsClient ObsClient) CreateBucket(input *CreateBucketInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("CreateBucketInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("CreateBucket", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket.
|
||||
//
|
||||
// You can use this API to delete a bucket. The bucket to be deleted must be empty
|
||||
// (containing no objects, noncurrent object versions, or part fragments).
|
||||
func (obsClient ObsClient) DeleteBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("DeleteBucket", HTTP_DELETE, bucketName, defaultSerializable, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketStoragePolicy sets bucket storage class.
|
||||
//
|
||||
// You can use this API to set storage class for bucket.
|
||||
func (obsClient ObsClient) SetBucketStoragePolicy(input *SetBucketStoragePolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketStoragePolicyInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketStoragePolicy", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
func (obsClient ObsClient) getBucketStoragePolicyS3(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) {
|
||||
output = &GetBucketStoragePolicyOutput{}
|
||||
var outputS3 *getBucketStoragePolicyOutputS3
|
||||
outputS3 = &getBucketStoragePolicyOutputS3{}
|
||||
err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStoragePolicy), outputS3, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
return
|
||||
}
|
||||
output.BaseModel = outputS3.BaseModel
|
||||
output.StorageClass = fmt.Sprintf("%s", outputS3.StorageClass)
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) getBucketStoragePolicyObs(bucketName string, extensions []extensionOptions) (output *GetBucketStoragePolicyOutput, err error) {
|
||||
output = &GetBucketStoragePolicyOutput{}
|
||||
var outputObs *getBucketStoragePolicyOutputObs
|
||||
outputObs = &getBucketStoragePolicyOutputObs{}
|
||||
err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageClass), outputObs, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
return
|
||||
}
|
||||
output.BaseModel = outputObs.BaseModel
|
||||
output.StorageClass = outputObs.StorageClass
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketStoragePolicy gets bucket storage class.
|
||||
//
|
||||
// You can use this API to obtain the storage class of a bucket.
|
||||
func (obsClient ObsClient) GetBucketStoragePolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketStoragePolicyOutput, err error) {
|
||||
if obsClient.conf.signature == SignatureObs {
|
||||
return obsClient.getBucketStoragePolicyObs(bucketName, extensions)
|
||||
}
|
||||
return obsClient.getBucketStoragePolicyS3(bucketName, extensions)
|
||||
}
|
||||
|
||||
// SetBucketQuota sets the bucket quota.
|
||||
//
|
||||
// You can use this API to set the bucket quota. A bucket quota must be expressed in bytes and the maximum value is 2^63-1.
|
||||
func (obsClient ObsClient) SetBucketQuota(input *SetBucketQuotaInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketQuotaInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketQuota", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketQuota gets the bucket quota.
|
||||
//
|
||||
// You can use this API to obtain the bucket quota. Value 0 indicates that no upper limit is set for the bucket quota.
|
||||
func (obsClient ObsClient) GetBucketQuota(bucketName string, extensions ...extensionOptions) (output *GetBucketQuotaOutput, err error) {
|
||||
output = &GetBucketQuotaOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketQuota", HTTP_GET, bucketName, newSubResourceSerial(SubResourceQuota), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HeadBucket checks whether a bucket exists.
|
||||
//
|
||||
// You can use this API to check whether a bucket exists.
|
||||
func (obsClient ObsClient) HeadBucket(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("HeadBucket", HTTP_HEAD, bucketName, defaultSerializable, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketMetadata gets the metadata of a bucket.
|
||||
//
|
||||
// You can use this API to send a HEAD request to a bucket to obtain the bucket
|
||||
// metadata such as the storage class and CORS rules (if set).
|
||||
func (obsClient ObsClient) GetBucketMetadata(input *GetBucketMetadataInput, extensions ...extensionOptions) (output *GetBucketMetadataOutput, err error) {
|
||||
output = &GetBucketMetadataOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketMetadata", HTTP_HEAD, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseGetBucketMetadataOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) GetBucketFSStatus(input *GetBucketFSStatusInput, extensions ...extensionOptions) (output *GetBucketFSStatusOutput, err error) {
|
||||
output = &GetBucketFSStatusOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketFSStatus", HTTP_HEAD, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseGetBucketFSStatusOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketStorageInfo gets storage information about a bucket.
|
||||
//
|
||||
// You can use this API to obtain storage information about a bucket, including the
|
||||
// bucket size and number of objects in the bucket.
|
||||
func (obsClient ObsClient) GetBucketStorageInfo(bucketName string, extensions ...extensionOptions) (output *GetBucketStorageInfoOutput, err error) {
|
||||
output = &GetBucketStorageInfoOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketStorageInfo", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageInfo), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) getBucketLocationS3(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) {
|
||||
output = &GetBucketLocationOutput{}
|
||||
var outputS3 *getBucketLocationOutputS3
|
||||
outputS3 = &getBucketLocationOutputS3{}
|
||||
err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputS3, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
output.BaseModel = outputS3.BaseModel
|
||||
output.Location = outputS3.Location
|
||||
}
|
||||
return
|
||||
}
|
||||
func (obsClient ObsClient) getBucketLocationObs(bucketName string, extensions []extensionOptions) (output *GetBucketLocationOutput, err error) {
|
||||
output = &GetBucketLocationOutput{}
|
||||
var outputObs *getBucketLocationOutputObs
|
||||
outputObs = &getBucketLocationOutputObs{}
|
||||
err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), outputObs, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
output.BaseModel = outputObs.BaseModel
|
||||
output.Location = outputObs.Location
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketLocation gets the location of a bucket.
|
||||
//
|
||||
// You can use this API to obtain the bucket location.
|
||||
func (obsClient ObsClient) GetBucketLocation(bucketName string, extensions ...extensionOptions) (output *GetBucketLocationOutput, err error) {
|
||||
if obsClient.conf.signature == SignatureObs {
|
||||
return obsClient.getBucketLocationObs(bucketName, extensions)
|
||||
}
|
||||
return obsClient.getBucketLocationS3(bucketName, extensions)
|
||||
}
|
||||
|
||||
// SetBucketAcl sets the bucket ACL.
|
||||
//
|
||||
// You can use this API to set the ACL for a bucket.
|
||||
func (obsClient ObsClient) SetBucketAcl(input *SetBucketAclInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketAclInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketAcl", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
func (obsClient ObsClient) getBucketACLObs(bucketName string, extensions []extensionOptions) (output *GetBucketAclOutput, err error) {
|
||||
output = &GetBucketAclOutput{}
|
||||
var outputObs *getBucketACLOutputObs
|
||||
outputObs = &getBucketACLOutputObs{}
|
||||
err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), outputObs, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
output.BaseModel = outputObs.BaseModel
|
||||
output.Owner = outputObs.Owner
|
||||
output.Grants = make([]Grant, 0, len(outputObs.Grants))
|
||||
for _, valGrant := range outputObs.Grants {
|
||||
tempOutput := Grant{}
|
||||
tempOutput.Delivered = valGrant.Delivered
|
||||
tempOutput.Permission = valGrant.Permission
|
||||
tempOutput.Grantee.DisplayName = valGrant.Grantee.DisplayName
|
||||
tempOutput.Grantee.ID = valGrant.Grantee.ID
|
||||
tempOutput.Grantee.Type = valGrant.Grantee.Type
|
||||
tempOutput.Grantee.URI = GroupAllUsers
|
||||
|
||||
output.Grants = append(output.Grants, tempOutput)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketAcl gets the bucket ACL.
|
||||
//
|
||||
// You can use this API to obtain a bucket ACL.
|
||||
func (obsClient ObsClient) GetBucketAcl(bucketName string, extensions ...extensionOptions) (output *GetBucketAclOutput, err error) {
|
||||
output = &GetBucketAclOutput{}
|
||||
if obsClient.conf.signature == SignatureObs {
|
||||
return obsClient.getBucketACLObs(bucketName, extensions)
|
||||
}
|
||||
err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketPolicy sets the bucket policy.
|
||||
//
|
||||
// You can use this API to set a bucket policy. If the bucket already has a policy, the
|
||||
// policy will be overwritten by the one specified in this request.
|
||||
func (obsClient ObsClient) SetBucketPolicy(input *SetBucketPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketPolicy is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketPolicy", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketPolicy gets the bucket policy.
|
||||
//
|
||||
// You can use this API to obtain the policy of a bucket.
|
||||
func (obsClient ObsClient) GetBucketPolicy(bucketName string, extensions ...extensionOptions) (output *GetBucketPolicyOutput, err error) {
|
||||
output = &GetBucketPolicyOutput{}
|
||||
err = obsClient.doActionWithBucketV2("GetBucketPolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy deletes the bucket policy.
|
||||
//
|
||||
// You can use this API to delete the policy of a bucket.
|
||||
func (obsClient ObsClient) DeleteBucketPolicy(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("DeleteBucketPolicy", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourcePolicy), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketCors sets CORS rules for a bucket.
|
||||
//
|
||||
// You can use this API to set CORS rules for a bucket to allow client browsers to send cross-origin requests.
|
||||
func (obsClient ObsClient) SetBucketCors(input *SetBucketCorsInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketCorsInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketCors", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketCors gets CORS rules of a bucket.
|
||||
//
|
||||
// You can use this API to obtain the CORS rules of a specified bucket.
|
||||
func (obsClient ObsClient) GetBucketCors(bucketName string, extensions ...extensionOptions) (output *GetBucketCorsOutput, err error) {
|
||||
output = &GetBucketCorsOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketCors", HTTP_GET, bucketName, newSubResourceSerial(SubResourceCors), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketCors deletes CORS rules of a bucket.
|
||||
//
|
||||
// You can use this API to delete the CORS rules of a specified bucket.
|
||||
func (obsClient ObsClient) DeleteBucketCors(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("DeleteBucketCors", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceCors), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketVersioning sets the versioning status for a bucket.
|
||||
//
|
||||
// You can use this API to set the versioning status for a bucket.
|
||||
func (obsClient ObsClient) SetBucketVersioning(input *SetBucketVersioningInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketVersioningInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketVersioning", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketVersioning gets the versioning status of a bucket.
|
||||
//
|
||||
// You can use this API to obtain the versioning status of a bucket.
|
||||
func (obsClient ObsClient) GetBucketVersioning(bucketName string, extensions ...extensionOptions) (output *GetBucketVersioningOutput, err error) {
|
||||
output = &GetBucketVersioningOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketVersioning", HTTP_GET, bucketName, newSubResourceSerial(SubResourceVersioning), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketWebsiteConfiguration sets website hosting for a bucket.
|
||||
//
|
||||
// You can use this API to set website hosting for a bucket.
|
||||
func (obsClient ObsClient) SetBucketWebsiteConfiguration(input *SetBucketWebsiteConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketWebsiteConfigurationInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketWebsiteConfiguration", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketWebsiteConfiguration gets the website hosting settings of a bucket.
|
||||
//
|
||||
// You can use this API to obtain the website hosting settings of a bucket.
|
||||
func (obsClient ObsClient) GetBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketWebsiteConfigurationOutput, err error) {
|
||||
output = &GetBucketWebsiteConfigurationOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketWebsiteConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketWebsiteConfiguration deletes the website hosting settings of a bucket.
|
||||
//
|
||||
// You can use this API to delete the website hosting settings of a bucket.
|
||||
func (obsClient ObsClient) DeleteBucketWebsiteConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("DeleteBucketWebsiteConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceWebsite), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketLoggingConfiguration sets the bucket logging.
|
||||
//
|
||||
// You can use this API to configure access logging for a bucket.
|
||||
func (obsClient ObsClient) SetBucketLoggingConfiguration(input *SetBucketLoggingConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketLoggingConfigurationInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketLoggingConfiguration", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketLoggingConfiguration gets the logging settings of a bucket.
|
||||
//
|
||||
// You can use this API to obtain the access logging settings of a bucket.
|
||||
func (obsClient ObsClient) GetBucketLoggingConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLoggingConfigurationOutput, err error) {
|
||||
output = &GetBucketLoggingConfigurationOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketLoggingConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLogging), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketLifecycleConfiguration sets lifecycle rules for a bucket.
|
||||
//
|
||||
// You can use this API to set lifecycle rules for a bucket, to periodically transit
|
||||
// storage classes of objects and delete objects in the bucket.
|
||||
func (obsClient ObsClient) SetBucketLifecycleConfiguration(input *SetBucketLifecycleConfigurationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketLifecycleConfigurationInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketLifecycleConfiguration", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketLifecycleConfiguration gets lifecycle rules of a bucket.
|
||||
//
|
||||
// You can use this API to obtain the lifecycle rules of a bucket.
|
||||
func (obsClient ObsClient) GetBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *GetBucketLifecycleConfigurationOutput, err error) {
|
||||
output = &GetBucketLifecycleConfigurationOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketLifecycleConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketLifecycleConfiguration deletes lifecycle rules of a bucket.
|
||||
//
|
||||
// You can use this API to delete all lifecycle rules of a bucket.
|
||||
func (obsClient ObsClient) DeleteBucketLifecycleConfiguration(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("DeleteBucketLifecycleConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceLifecycle), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketEncryption sets the default server-side encryption for a bucket.
|
||||
//
|
||||
// You can use this API to create or update the default server-side encryption for a bucket.
|
||||
func (obsClient ObsClient) SetBucketEncryption(input *SetBucketEncryptionInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketEncryptionInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketEncryption", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketEncryption gets the encryption configuration of a bucket.
|
||||
//
|
||||
// You can use this API to obtain obtain the encryption configuration of a bucket.
|
||||
func (obsClient ObsClient) GetBucketEncryption(bucketName string, extensions ...extensionOptions) (output *GetBucketEncryptionOutput, err error) {
|
||||
output = &GetBucketEncryptionOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketEncryption", HTTP_GET, bucketName, newSubResourceSerial(SubResourceEncryption), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketEncryption deletes the encryption configuration of a bucket.
|
||||
//
|
||||
// You can use this API to delete the encryption configuration of a bucket.
|
||||
func (obsClient ObsClient) DeleteBucketEncryption(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("DeleteBucketEncryption", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceEncryption), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketTagging sets bucket tags.
|
||||
//
|
||||
// You can use this API to set bucket tags.
|
||||
func (obsClient ObsClient) SetBucketTagging(input *SetBucketTaggingInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketTaggingInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketTagging", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketTagging gets bucket tags.
|
||||
//
|
||||
// You can use this API to obtain the tags of a specified bucket.
|
||||
func (obsClient ObsClient) GetBucketTagging(bucketName string, extensions ...extensionOptions) (output *GetBucketTaggingOutput, err error) {
|
||||
output = &GetBucketTaggingOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketTagging", HTTP_GET, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketTagging deletes bucket tags.
|
||||
//
|
||||
// You can use this API to delete the tags of a specified bucket.
|
||||
func (obsClient ObsClient) DeleteBucketTagging(bucketName string, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("DeleteBucketTagging", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceTagging), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketNotification sets event notification for a bucket.
|
||||
//
|
||||
// You can use this API to configure event notification for a bucket. You will be notified of all
|
||||
// specified operations performed on the bucket.
|
||||
func (obsClient ObsClient) SetBucketNotification(input *SetBucketNotificationInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketNotificationInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketNotification", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketNotification gets event notification settings of a bucket.
|
||||
//
|
||||
// You can use this API to obtain the event notification configuration of a bucket.
|
||||
func (obsClient ObsClient) GetBucketNotification(bucketName string, extensions ...extensionOptions) (output *GetBucketNotificationOutput, err error) {
|
||||
if obsClient.conf.signature != SignatureObs {
|
||||
return obsClient.getBucketNotificationS3(bucketName, extensions)
|
||||
}
|
||||
output = &GetBucketNotificationOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) getBucketNotificationS3(bucketName string, extensions []extensionOptions) (output *GetBucketNotificationOutput, err error) {
|
||||
outputS3 := &getBucketNotificationOutputS3{}
|
||||
err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), outputS3, extensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output = &GetBucketNotificationOutput{}
|
||||
output.BaseModel = outputS3.BaseModel
|
||||
topicConfigurations := make([]TopicConfiguration, 0, len(outputS3.TopicConfigurations))
|
||||
for _, topicConfigurationS3 := range outputS3.TopicConfigurations {
|
||||
topicConfiguration := TopicConfiguration{}
|
||||
topicConfiguration.ID = topicConfigurationS3.ID
|
||||
topicConfiguration.Topic = topicConfigurationS3.Topic
|
||||
topicConfiguration.FilterRules = topicConfigurationS3.FilterRules
|
||||
|
||||
events := make([]EventType, 0, len(topicConfigurationS3.Events))
|
||||
for _, event := range topicConfigurationS3.Events {
|
||||
events = append(events, ParseStringToEventType(event))
|
||||
}
|
||||
topicConfiguration.Events = events
|
||||
topicConfigurations = append(topicConfigurations, topicConfiguration)
|
||||
}
|
||||
output.TopicConfigurations = topicConfigurations
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketRequestPayment sets requester-pays setting for a bucket.
|
||||
func (obsClient ObsClient) SetBucketRequestPayment(input *SetBucketRequestPaymentInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketRequestPaymentInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucket("SetBucketRequestPayment", HTTP_PUT, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketRequestPayment gets requester-pays setting of a bucket.
|
||||
func (obsClient ObsClient) GetBucketRequestPayment(bucketName string, extensions ...extensionOptions) (output *GetBucketRequestPaymentOutput, err error) {
|
||||
output = &GetBucketRequestPaymentOutput{}
|
||||
err = obsClient.doActionWithBucket("GetBucketRequestPayment", HTTP_GET, bucketName, newSubResourceSerial(SubResourceRequestPayment), output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketFetchPolicy sets the bucket fetch policy.
|
||||
//
|
||||
// You can use this API to set a bucket fetch policy.
|
||||
func (obsClient ObsClient) SetBucketFetchPolicy(input *SetBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketFetchPolicyInput is nil")
|
||||
}
|
||||
if strings.TrimSpace(string(input.Status)) == "" {
|
||||
return nil, errors.New("Fetch policy status is empty")
|
||||
}
|
||||
if strings.TrimSpace(input.Agency) == "" {
|
||||
return nil, errors.New("Fetch policy agency is empty")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucketAndKey("SetBucketFetchPolicy", HTTP_PUT, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketFetchPolicy gets the bucket fetch policy.
|
||||
//
|
||||
// You can use this API to obtain the fetch policy of a bucket.
|
||||
func (obsClient ObsClient) GetBucketFetchPolicy(input *GetBucketFetchPolicyInput, extensions ...extensionOptions) (output *GetBucketFetchPolicyOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("GetBucketFetchPolicyInput is nil")
|
||||
}
|
||||
output = &GetBucketFetchPolicyOutput{}
|
||||
err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchPolicy", HTTP_GET, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketFetchPolicy deletes the bucket fetch policy.
|
||||
//
|
||||
// You can use this API to delete the fetch policy of a bucket.
|
||||
func (obsClient ObsClient) DeleteBucketFetchPolicy(input *DeleteBucketFetchPolicyInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("DeleteBucketFetchPolicyInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucketAndKey("DeleteBucketFetchPolicy", HTTP_DELETE, input.Bucket, string(objectKeyExtensionPolicy), input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketFetchJob sets the bucket fetch job.
|
||||
//
|
||||
// You can use this API to set a bucket fetch job.
|
||||
func (obsClient ObsClient) SetBucketFetchJob(input *SetBucketFetchJobInput, extensions ...extensionOptions) (output *SetBucketFetchJobOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetBucketFetchJobInput is nil")
|
||||
}
|
||||
if strings.TrimSpace(input.URL) == "" {
|
||||
return nil, errors.New("URL is empty")
|
||||
}
|
||||
output = &SetBucketFetchJobOutput{}
|
||||
err = obsClient.doActionWithBucketAndKeyV2("SetBucketFetchJob", HTTP_POST, input.Bucket, string(objectKeyAsyncFetchJob), input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketFetchJob gets the bucket fetch job.
|
||||
//
|
||||
// You can use this API to obtain the fetch job of a bucket.
|
||||
func (obsClient ObsClient) GetBucketFetchJob(input *GetBucketFetchJobInput, extensions ...extensionOptions) (output *GetBucketFetchJobOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("GetBucketFetchJobInput is nil")
|
||||
}
|
||||
if strings.TrimSpace(input.JobID) == "" {
|
||||
return nil, errors.New("JobID is empty")
|
||||
}
|
||||
output = &GetBucketFetchJobOutput{}
|
||||
err = obsClient.doActionWithBucketAndKeyV2("GetBucketFetchJob", HTTP_GET, input.Bucket, string(objectKeyAsyncFetchJob)+"/"+input.JobID, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
506
myhwoss/obs/client_object.go
Normal file
506
myhwoss/obs/client_object.go
Normal file
@@ -0,0 +1,506 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ListObjects lists objects in a bucket.
|
||||
//
|
||||
// You can use this API to list objects in a bucket. By default, a maximum of 1000 objects are listed.
|
||||
func (obsClient ObsClient) ListObjects(input *ListObjectsInput, extensions ...extensionOptions) (output *ListObjectsOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("ListObjectsInput is nil")
|
||||
}
|
||||
output = &ListObjectsOutput{}
|
||||
err = obsClient.doActionWithBucket("ListObjects", HTTP_GET, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
|
||||
output.Location = location[0]
|
||||
}
|
||||
if output.EncodingType == "url" {
|
||||
err = decodeListObjectsOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get ListObjectsOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListVersions lists versioning objects in a bucket.
|
||||
//
|
||||
// You can use this API to list versioning objects in a bucket. By default, a maximum of 1000 versioning objects are listed.
|
||||
func (obsClient ObsClient) ListVersions(input *ListVersionsInput, extensions ...extensionOptions) (output *ListVersionsOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("ListVersionsInput is nil")
|
||||
}
|
||||
output = &ListVersionsOutput{}
|
||||
err = obsClient.doActionWithBucket("ListVersions", HTTP_GET, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
|
||||
output.Location = location[0]
|
||||
}
|
||||
if output.EncodingType == "url" {
|
||||
err = decodeListVersionsOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get ListVersionsOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HeadObject checks whether an object exists.
|
||||
//
|
||||
// You can use this API to check whether an object exists.
|
||||
func (obsClient ObsClient) HeadObject(input *HeadObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("HeadObjectInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucketAndKey("HeadObject", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetObjectMetadata sets object metadata.
|
||||
func (obsClient ObsClient) SetObjectMetadata(input *SetObjectMetadataInput, extensions ...extensionOptions) (output *SetObjectMetadataOutput, err error) {
|
||||
output = &SetObjectMetadataOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("SetObjectMetadata", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseSetObjectMetadataOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteObject deletes an object.
|
||||
//
|
||||
// You can use this API to delete an object from a specified bucket.
|
||||
func (obsClient ObsClient) DeleteObject(input *DeleteObjectInput, extensions ...extensionOptions) (output *DeleteObjectOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("DeleteObjectInput is nil")
|
||||
}
|
||||
output = &DeleteObjectOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("DeleteObject", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseDeleteObjectOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteObjects deletes objects in a batch.
|
||||
//
|
||||
// You can use this API to batch delete objects from a specified bucket.
|
||||
func (obsClient ObsClient) DeleteObjects(input *DeleteObjectsInput, extensions ...extensionOptions) (output *DeleteObjectsOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("DeleteObjectsInput is nil")
|
||||
}
|
||||
output = &DeleteObjectsOutput{}
|
||||
err = obsClient.doActionWithBucket("DeleteObjects", HTTP_POST, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else if output.EncodingType == "url" {
|
||||
err = decodeDeleteObjectsOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get DeleteObjectsOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetObjectAcl sets ACL for an object.
|
||||
//
|
||||
// You can use this API to set the ACL for an object in a specified bucket.
|
||||
func (obsClient ObsClient) SetObjectAcl(input *SetObjectAclInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("SetObjectAclInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucketAndKey("SetObjectAcl", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetObjectAcl gets the ACL of an object.
|
||||
//
|
||||
// You can use this API to obtain the ACL of an object in a specified bucket.
|
||||
func (obsClient ObsClient) GetObjectAcl(input *GetObjectAclInput, extensions ...extensionOptions) (output *GetObjectAclOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("GetObjectAclInput is nil")
|
||||
}
|
||||
output = &GetObjectAclOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("GetObjectAcl", HTTP_GET, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
|
||||
output.VersionId = versionID[0]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RestoreObject restores an object.
|
||||
func (obsClient ObsClient) RestoreObject(input *RestoreObjectInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("RestoreObjectInput is nil")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucketAndKey("RestoreObject", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetObjectMetadata gets object metadata.
|
||||
//
|
||||
// You can use this API to send a HEAD request to the object of a specified bucket to obtain its metadata.
|
||||
func (obsClient ObsClient) GetObjectMetadata(input *GetObjectMetadataInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("GetObjectMetadataInput is nil")
|
||||
}
|
||||
output = &GetObjectMetadataOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("GetObjectMetadata", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseGetObjectMetadataOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) GetAttribute(input *GetAttributeInput, extensions ...extensionOptions) (output *GetAttributeOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("GetAttributeInput is nil")
|
||||
}
|
||||
output = &GetAttributeOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("GetAttribute", HTTP_HEAD, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseGetAttributeOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetObject downloads object.
|
||||
//
|
||||
// You can use this API to download an object in a specified bucket.
|
||||
func (obsClient ObsClient) GetObject(input *GetObjectInput, extensions ...extensionOptions) (output *GetObjectOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("GetObjectInput is nil")
|
||||
}
|
||||
output = &GetObjectOutput{}
|
||||
err = obsClient.doActionWithBucketAndKeyWithProgress(GET_OBJECT, HTTP_GET, input.Bucket, input.Key, input, output, extensions, nil)
|
||||
if err != nil {
|
||||
output = nil
|
||||
return
|
||||
}
|
||||
|
||||
ParseGetObjectOutput(output)
|
||||
listener := obsClient.getProgressListener(extensions)
|
||||
if listener != nil {
|
||||
output.Body = TeeReader(output.Body, output.ContentLength, listener, nil)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) GetObjectWithoutProgress(input *GetObjectInput, extensions ...extensionOptions) (output *GetObjectOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("GetObjectInput is nil")
|
||||
}
|
||||
output = &GetObjectOutput{}
|
||||
err = obsClient.doActionWithBucketAndKeyWithProgress(GET_OBJECT, HTTP_GET, input.Bucket, input.Key, input, output, extensions, nil)
|
||||
if err != nil {
|
||||
output = nil
|
||||
return
|
||||
}
|
||||
|
||||
ParseGetObjectOutput(output)
|
||||
return
|
||||
}
|
||||
|
||||
// PutObject uploads an object to the specified bucket.
|
||||
func (obsClient ObsClient) PutObject(input *PutObjectInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("PutObjectInput is nil")
|
||||
}
|
||||
|
||||
if input.ContentType == "" && input.Key != "" {
|
||||
if contentType, ok := GetContentType(input.Key); ok {
|
||||
input.ContentType = contentType
|
||||
}
|
||||
}
|
||||
output = &PutObjectOutput{}
|
||||
var repeatable bool
|
||||
if input.Body != nil {
|
||||
if _, ok := input.Body.(*strings.Reader); ok {
|
||||
repeatable = true
|
||||
}
|
||||
if input.ContentLength > 0 {
|
||||
input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength}
|
||||
}
|
||||
}
|
||||
|
||||
listener := obsClient.getProgressListener(extensions)
|
||||
if repeatable {
|
||||
err = obsClient.doActionWithBucketAndKeyWithProgress(PUT_OBJECT, HTTP_PUT, input.Bucket, input.Key, input, output, extensions, listener)
|
||||
} else {
|
||||
err = obsClient.doActionWithBucketAndKeyUnRepeatableWithProgress(PUT_OBJECT, HTTP_PUT, input.Bucket, input.Key, input, output, extensions, listener)
|
||||
}
|
||||
if err != nil {
|
||||
output = nil
|
||||
return
|
||||
}
|
||||
ParsePutObjectOutput(output)
|
||||
output.ObjectUrl = fmt.Sprintf("%s/%s/%s", obsClient.conf.endpoint, input.Bucket, input.Key)
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) getContentType(input *PutObjectInput, sourceFile string) (contentType string) {
|
||||
if contentType, ok := GetContentType(input.Key); ok {
|
||||
return contentType
|
||||
}
|
||||
if contentType, ok := GetContentType(sourceFile); ok {
|
||||
return contentType
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) isGetContentType(input *PutObjectInput) bool {
|
||||
if input.ContentType == "" && input.Key != "" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) NewFolder(input *NewFolderInput, extensions ...extensionOptions) (output *NewFolderOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("NewFolderInput is nil")
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(input.Key, "/") {
|
||||
input.Key += "/"
|
||||
}
|
||||
|
||||
output = &NewFolderOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("NewFolder", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseNewFolderOutput(output)
|
||||
output.ObjectUrl = fmt.Sprintf("%s/%s/%s", obsClient.conf.endpoint, input.Bucket, input.Key)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PutFile uploads a file to the specified bucket.
|
||||
func (obsClient ObsClient) PutFile(input *PutFileInput, extensions ...extensionOptions) (output *PutObjectOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("PutFileInput is nil")
|
||||
}
|
||||
|
||||
var body io.Reader
|
||||
sourceFile := strings.TrimSpace(input.SourceFile)
|
||||
if sourceFile != "" {
|
||||
fd, _err := os.Open(sourceFile)
|
||||
if _err != nil {
|
||||
err = _err
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
errMsg := fd.Close()
|
||||
if errMsg != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
|
||||
}
|
||||
}()
|
||||
|
||||
stat, _err := fd.Stat()
|
||||
if _err != nil {
|
||||
err = _err
|
||||
return nil, err
|
||||
}
|
||||
fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
|
||||
fileReaderWrapper.reader = fd
|
||||
if input.ContentLength > 0 {
|
||||
if input.ContentLength > stat.Size() {
|
||||
input.ContentLength = stat.Size()
|
||||
}
|
||||
fileReaderWrapper.totalCount = input.ContentLength
|
||||
} else {
|
||||
fileReaderWrapper.totalCount = stat.Size()
|
||||
}
|
||||
body = fileReaderWrapper
|
||||
}
|
||||
|
||||
_input := &PutObjectInput{}
|
||||
_input.PutObjectBasicInput = input.PutObjectBasicInput
|
||||
_input.Body = body
|
||||
|
||||
if obsClient.isGetContentType(_input) {
|
||||
_input.ContentType = obsClient.getContentType(_input, sourceFile)
|
||||
}
|
||||
listener := obsClient.getProgressListener(extensions)
|
||||
output = &PutObjectOutput{}
|
||||
err = obsClient.doActionWithBucketAndKeyWithProgress(PUT_FILE, HTTP_PUT, _input.Bucket, _input.Key, _input, output, extensions, listener)
|
||||
|
||||
if err != nil {
|
||||
output = nil
|
||||
return
|
||||
}
|
||||
|
||||
ParsePutObjectOutput(output)
|
||||
output.ObjectUrl = fmt.Sprintf("%s/%s/%s", obsClient.conf.endpoint, input.Bucket, input.Key)
|
||||
return
|
||||
}
|
||||
|
||||
// CopyObject creates a copy for an existing object.
|
||||
//
|
||||
// You can use this API to create a copy for an object in a specified bucket.
|
||||
func (obsClient ObsClient) CopyObject(input *CopyObjectInput, extensions ...extensionOptions) (output *CopyObjectOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("CopyObjectInput is nil")
|
||||
}
|
||||
|
||||
if strings.TrimSpace(input.CopySourceBucket) == "" {
|
||||
return nil, errors.New("Source bucket is empty")
|
||||
}
|
||||
if strings.TrimSpace(input.CopySourceKey) == "" {
|
||||
return nil, errors.New("Source key is empty")
|
||||
}
|
||||
|
||||
output = &CopyObjectOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("CopyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseCopyObjectOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) AppendObject(input *AppendObjectInput, extensions ...extensionOptions) (output *AppendObjectOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("AppendObjectInput is nil")
|
||||
}
|
||||
|
||||
if input.ContentType == "" && input.Key != "" {
|
||||
if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok {
|
||||
input.ContentType = contentType
|
||||
}
|
||||
}
|
||||
output = &AppendObjectOutput{}
|
||||
var repeatable bool
|
||||
if input.Body != nil {
|
||||
if _, ok := input.Body.(*strings.Reader); ok {
|
||||
repeatable = true
|
||||
}
|
||||
if input.ContentLength > 0 {
|
||||
input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength}
|
||||
}
|
||||
}
|
||||
listener := obsClient.getProgressListener(extensions)
|
||||
|
||||
if repeatable {
|
||||
err = obsClient.doActionWithBucketAndKeyWithProgress(APPEND_OBJECT, HTTP_POST, input.Bucket, input.Key, input, output, extensions, listener)
|
||||
} else {
|
||||
err = obsClient.doActionWithBucketAndKeyUnRepeatableWithProgress(APPEND_OBJECT, HTTP_POST, input.Bucket, input.Key, input, output, extensions, listener)
|
||||
}
|
||||
|
||||
if err != nil || ParseAppendObjectOutput(output) != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) ModifyObject(input *ModifyObjectInput, extensions ...extensionOptions) (output *ModifyObjectOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("ModifyObjectInput is nil")
|
||||
}
|
||||
|
||||
output = &ModifyObjectOutput{}
|
||||
var repeatable bool
|
||||
if input.Body != nil {
|
||||
if _, ok := input.Body.(*strings.Reader); ok {
|
||||
repeatable = true
|
||||
}
|
||||
if input.ContentLength > 0 {
|
||||
input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength}
|
||||
}
|
||||
}
|
||||
if repeatable {
|
||||
err = obsClient.doActionWithBucketAndKey("ModifyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
|
||||
} else {
|
||||
err = obsClient.doActionWithBucketAndKeyUnRepeatable("ModifyObject", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
|
||||
}
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseModifyObjectOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) RenameFile(input *RenameFileInput, extensions ...extensionOptions) (output *RenameFileOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("RenameFileInput is nil")
|
||||
}
|
||||
|
||||
output = &RenameFileOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("RenameFile", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) RenameFolder(input *RenameFolderInput, extensions ...extensionOptions) (output *RenameFolderOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("RenameFolderInput is nil")
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(input.Key, "/") {
|
||||
input.Key += "/"
|
||||
}
|
||||
if !strings.HasSuffix(input.NewObjectKey, "/") {
|
||||
input.NewObjectKey += "/"
|
||||
}
|
||||
output = &RenameFolderOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("RenameFolder", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
49
myhwoss/obs/client_other.go
Normal file
49
myhwoss/obs/client_other.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Refresh refreshes ak, sk and securityToken for obsClient.
|
||||
func (obsClient ObsClient) Refresh(ak, sk, securityToken string) {
|
||||
for _, sp := range obsClient.conf.securityProviders {
|
||||
if bsp, ok := sp.(*BasicSecurityProvider); ok {
|
||||
bsp.refresh(strings.TrimSpace(ak), strings.TrimSpace(sk), strings.TrimSpace(securityToken))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) getSecurity() securityHolder {
|
||||
if obsClient.conf.securityProviders != nil {
|
||||
for _, sp := range obsClient.conf.securityProviders {
|
||||
if sp == nil {
|
||||
continue
|
||||
}
|
||||
sh := sp.getSecurity()
|
||||
if sh.ak != "" && sh.sk != "" {
|
||||
return sh
|
||||
}
|
||||
}
|
||||
}
|
||||
return emptySecurityHolder
|
||||
}
|
||||
|
||||
// Close closes ObsClient.
|
||||
func (obsClient *ObsClient) Close() {
|
||||
obsClient.httpClient = nil
|
||||
obsClient.conf.transport.CloseIdleConnections()
|
||||
obsClient.conf = nil
|
||||
}
|
252
myhwoss/obs/client_part.go
Normal file
252
myhwoss/obs/client_part.go
Normal file
@@ -0,0 +1,252 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ListMultipartUploads lists the multipart uploads.
|
||||
//
|
||||
// You can use this API to list the multipart uploads that are initialized but not combined or aborted in a specified bucket.
|
||||
func (obsClient ObsClient) ListMultipartUploads(input *ListMultipartUploadsInput, extensions ...extensionOptions) (output *ListMultipartUploadsOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("ListMultipartUploadsInput is nil")
|
||||
}
|
||||
output = &ListMultipartUploadsOutput{}
|
||||
err = obsClient.doActionWithBucket("ListMultipartUploads", HTTP_GET, input.Bucket, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else if output.EncodingType == "url" {
|
||||
err = decodeListMultipartUploadsOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get ListMultipartUploadsOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AbortMultipartUpload aborts a multipart upload in a specified bucket by using the multipart upload ID.
|
||||
func (obsClient ObsClient) AbortMultipartUpload(input *AbortMultipartUploadInput, extensions ...extensionOptions) (output *BaseModel, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("AbortMultipartUploadInput is nil")
|
||||
}
|
||||
if input.UploadId == "" {
|
||||
return nil, errors.New("UploadId is empty")
|
||||
}
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doActionWithBucketAndKey("AbortMultipartUpload", HTTP_DELETE, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// InitiateMultipartUpload initializes a multipart upload.
|
||||
func (obsClient ObsClient) InitiateMultipartUpload(input *InitiateMultipartUploadInput, extensions ...extensionOptions) (output *InitiateMultipartUploadOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("InitiateMultipartUploadInput is nil")
|
||||
}
|
||||
|
||||
if input.ContentType == "" && input.Key != "" {
|
||||
if contentType, ok := mimeTypes[strings.ToLower(input.Key[strings.LastIndex(input.Key, ".")+1:])]; ok {
|
||||
input.ContentType = contentType
|
||||
}
|
||||
}
|
||||
|
||||
output = &InitiateMultipartUploadOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("InitiateMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseInitiateMultipartUploadOutput(output)
|
||||
if output.EncodingType == "url" {
|
||||
err = decodeInitiateMultipartUploadOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get InitiateMultipartUploadOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UploadPart uploads a part to a specified bucket by using a specified multipart upload ID.
|
||||
//
|
||||
// After a multipart upload is initialized, you can use this API to upload a part to a specified bucket
|
||||
// by using the multipart upload ID. Except for the last uploaded part whose size ranges from 0 to 5 GB,
|
||||
// sizes of the other parts range from 100 KB to 5 GB. The upload part ID ranges from 1 to 10000.
|
||||
func (obsClient ObsClient) UploadPart(_input *UploadPartInput, extensions ...extensionOptions) (output *UploadPartOutput, err error) {
|
||||
if _input == nil {
|
||||
return nil, errors.New("UploadPartInput is nil")
|
||||
}
|
||||
|
||||
if _input.UploadId == "" {
|
||||
return nil, errors.New("UploadId is empty")
|
||||
}
|
||||
|
||||
input := &UploadPartInput{}
|
||||
input.Bucket = _input.Bucket
|
||||
input.Key = _input.Key
|
||||
input.PartNumber = _input.PartNumber
|
||||
input.UploadId = _input.UploadId
|
||||
input.ContentMD5 = _input.ContentMD5
|
||||
input.SourceFile = _input.SourceFile
|
||||
input.Offset = _input.Offset
|
||||
input.PartSize = _input.PartSize
|
||||
input.SseHeader = _input.SseHeader
|
||||
input.Body = _input.Body
|
||||
|
||||
output = &UploadPartOutput{}
|
||||
var repeatable bool
|
||||
if input.Body != nil {
|
||||
if _, ok := input.Body.(*strings.Reader); ok {
|
||||
repeatable = true
|
||||
}
|
||||
if _, ok := input.Body.(*readerWrapper); !ok && input.PartSize > 0 {
|
||||
input.Body = &readerWrapper{reader: input.Body, totalCount: input.PartSize}
|
||||
}
|
||||
} else if sourceFile := strings.TrimSpace(input.SourceFile); sourceFile != "" {
|
||||
fd, _err := os.Open(sourceFile)
|
||||
if _err != nil {
|
||||
err = _err
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
errMsg := fd.Close()
|
||||
if errMsg != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
|
||||
}
|
||||
}()
|
||||
|
||||
stat, _err := fd.Stat()
|
||||
if _err != nil {
|
||||
err = _err
|
||||
return nil, err
|
||||
}
|
||||
fileSize := stat.Size()
|
||||
fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
|
||||
fileReaderWrapper.reader = fd
|
||||
|
||||
if input.Offset < 0 || input.Offset > fileSize {
|
||||
input.Offset = 0
|
||||
}
|
||||
|
||||
if input.PartSize <= 0 || input.PartSize > (fileSize-input.Offset) {
|
||||
input.PartSize = fileSize - input.Offset
|
||||
}
|
||||
fileReaderWrapper.totalCount = input.PartSize
|
||||
if _, err = fd.Seek(input.Offset, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
input.Body = fileReaderWrapper
|
||||
repeatable = true
|
||||
}
|
||||
if repeatable {
|
||||
err = obsClient.doActionWithBucketAndKey("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
|
||||
} else {
|
||||
err = obsClient.doActionWithBucketAndKeyUnRepeatable("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
|
||||
}
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseUploadPartOutput(output)
|
||||
output.PartNumber = input.PartNumber
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload combines the uploaded parts in a specified bucket by using the multipart upload ID.
|
||||
func (obsClient ObsClient) CompleteMultipartUpload(input *CompleteMultipartUploadInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("CompleteMultipartUploadInput is nil")
|
||||
}
|
||||
|
||||
if input.UploadId == "" {
|
||||
return nil, errors.New("UploadId is empty")
|
||||
}
|
||||
|
||||
var parts partSlice = input.Parts
|
||||
sort.Sort(parts)
|
||||
|
||||
output = &CompleteMultipartUploadOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("CompleteMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseCompleteMultipartUploadOutput(output)
|
||||
if output.EncodingType == "url" {
|
||||
err = decodeCompleteMultipartUploadOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get CompleteMultipartUploadOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListParts lists the uploaded parts in a bucket by using the multipart upload ID.
|
||||
func (obsClient ObsClient) ListParts(input *ListPartsInput, extensions ...extensionOptions) (output *ListPartsOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("ListPartsInput is nil")
|
||||
}
|
||||
if input.UploadId == "" {
|
||||
return nil, errors.New("UploadId is empty")
|
||||
}
|
||||
output = &ListPartsOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("ListParts", HTTP_GET, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else if output.EncodingType == "url" {
|
||||
err = decodeListPartsOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get ListPartsOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CopyPart copy a part to a specified bucket by using a specified multipart upload ID.
|
||||
//
|
||||
// After a multipart upload is initialized, you can use this API to copy a part to a specified bucket by using the multipart upload ID.
|
||||
func (obsClient ObsClient) CopyPart(input *CopyPartInput, extensions ...extensionOptions) (output *CopyPartOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("CopyPartInput is nil")
|
||||
}
|
||||
if input.UploadId == "" {
|
||||
return nil, errors.New("UploadId is empty")
|
||||
}
|
||||
if strings.TrimSpace(input.CopySourceBucket) == "" {
|
||||
return nil, errors.New("Source bucket is empty")
|
||||
}
|
||||
if strings.TrimSpace(input.CopySourceKey) == "" {
|
||||
return nil, errors.New("Source key is empty")
|
||||
}
|
||||
|
||||
output = &CopyPartOutput{}
|
||||
err = obsClient.doActionWithBucketAndKey("CopyPart", HTTP_PUT, input.Bucket, input.Key, input, output, extensions)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseCopyPartOutput(output)
|
||||
output.PartNumber = input.PartNumber
|
||||
}
|
||||
return
|
||||
}
|
59
myhwoss/obs/client_resume.go
Normal file
59
myhwoss/obs/client_resume.go
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
// UploadFile resume uploads.
|
||||
//
|
||||
// This API is an encapsulated and enhanced version of multipart upload, and aims to eliminate large file
|
||||
// upload failures caused by poor network conditions and program breakdowns.
|
||||
func (obsClient ObsClient) UploadFile(input *UploadFileInput, extensions ...extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
|
||||
if input.EnableCheckpoint && input.CheckpointFile == "" {
|
||||
input.CheckpointFile = input.UploadFile + ".uploadfile_record"
|
||||
}
|
||||
|
||||
if input.TaskNum <= 0 {
|
||||
input.TaskNum = 1
|
||||
}
|
||||
if input.PartSize < MIN_PART_SIZE {
|
||||
input.PartSize = MIN_PART_SIZE
|
||||
} else if input.PartSize > MAX_PART_SIZE {
|
||||
input.PartSize = MAX_PART_SIZE
|
||||
}
|
||||
|
||||
output, err = obsClient.resumeUpload(input, extensions)
|
||||
return
|
||||
}
|
||||
|
||||
// DownloadFile resume downloads.
|
||||
//
|
||||
// This API is an encapsulated and enhanced version of partial download, and aims to eliminate large file
|
||||
// download failures caused by poor network conditions and program breakdowns.
|
||||
func (obsClient ObsClient) DownloadFile(input *DownloadFileInput, extensions ...extensionOptions) (output *GetObjectMetadataOutput, err error) {
|
||||
if input.DownloadFile == "" {
|
||||
input.DownloadFile = input.Key
|
||||
}
|
||||
|
||||
if input.EnableCheckpoint && input.CheckpointFile == "" {
|
||||
input.CheckpointFile = input.DownloadFile + ".downloadfile_record"
|
||||
}
|
||||
|
||||
if input.TaskNum <= 0 {
|
||||
input.TaskNum = 1
|
||||
}
|
||||
if input.PartSize <= 0 {
|
||||
input.PartSize = DEFAULT_PART_SIZE
|
||||
}
|
||||
|
||||
output, err = obsClient.resumeDownload(input, extensions)
|
||||
return
|
||||
}
|
561
myhwoss/obs/conf.go
Normal file
561
myhwoss/obs/conf.go
Normal file
@@ -0,0 +1,561 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
)
|
||||
|
||||
type urlHolder struct {
|
||||
scheme string
|
||||
host string
|
||||
port int
|
||||
}
|
||||
|
||||
type config struct {
|
||||
securityProviders []securityProvider
|
||||
urlHolder *urlHolder
|
||||
pathStyle bool
|
||||
cname bool
|
||||
sslVerify bool
|
||||
endpoint string
|
||||
signature SignatureType
|
||||
region string
|
||||
connectTimeout int
|
||||
socketTimeout int
|
||||
headerTimeout int
|
||||
idleConnTimeout int
|
||||
finalTimeout int
|
||||
maxRetryCount int
|
||||
proxyURL string
|
||||
noProxyURL string
|
||||
proxyFromEnv bool
|
||||
maxConnsPerHost int
|
||||
pemCerts []byte
|
||||
transport *http.Transport
|
||||
roundTripper http.RoundTripper
|
||||
httpClient *http.Client
|
||||
ctx context.Context
|
||||
maxRedirectCount int
|
||||
userAgent string
|
||||
enableCompression bool
|
||||
progressListener ProgressListener
|
||||
|
||||
customProxyOnce sync.Once
|
||||
customProxyFuncValue func(*url.URL) (*url.URL, error)
|
||||
}
|
||||
|
||||
func (conf config) String() string {
|
||||
return fmt.Sprintf("[endpoint:%s, signature:%s, pathStyle:%v, region:%s"+
|
||||
"\nconnectTimeout:%d, socketTimeout:%dheaderTimeout:%d, idleConnTimeout:%d"+
|
||||
"\nmaxRetryCount:%d, maxConnsPerHost:%d, sslVerify:%v, maxRedirectCount:%d]",
|
||||
conf.endpoint, conf.signature, conf.pathStyle, conf.region,
|
||||
conf.connectTimeout, conf.socketTimeout, conf.headerTimeout, conf.idleConnTimeout,
|
||||
conf.maxRetryCount, conf.maxConnsPerHost, conf.sslVerify, conf.maxRedirectCount,
|
||||
)
|
||||
}
|
||||
|
||||
type configurer func(conf *config)
|
||||
|
||||
func WithSecurityProviders(sps ...securityProvider) configurer {
|
||||
return func(conf *config) {
|
||||
for _, sp := range sps {
|
||||
if sp != nil {
|
||||
conf.securityProviders = append(conf.securityProviders, sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithSslVerify is a wrapper for WithSslVerifyAndPemCerts.
|
||||
func WithSslVerify(sslVerify bool) configurer {
|
||||
return WithSslVerifyAndPemCerts(sslVerify, nil)
|
||||
}
|
||||
|
||||
// WithSslVerifyAndPemCerts is a configurer for ObsClient to set conf.sslVerify and conf.pemCerts.
|
||||
func WithSslVerifyAndPemCerts(sslVerify bool, pemCerts []byte) configurer {
|
||||
return func(conf *config) {
|
||||
conf.sslVerify = sslVerify
|
||||
conf.pemCerts = pemCerts
|
||||
}
|
||||
}
|
||||
|
||||
// WithHeaderTimeout is a configurer for ObsClient to set the timeout period of obtaining the response headers.
|
||||
func WithHeaderTimeout(headerTimeout int) configurer {
|
||||
return func(conf *config) {
|
||||
conf.headerTimeout = headerTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithProxyUrl is a configurer for ObsClient to set HTTP proxy.
|
||||
func WithProxyUrl(proxyURL string) configurer {
|
||||
return func(conf *config) {
|
||||
conf.proxyURL = proxyURL
|
||||
}
|
||||
}
|
||||
|
||||
// WithNoProxyUrl is a configurer for ObsClient to set HTTP no_proxy.
|
||||
func WithNoProxyUrl(noProxyURL string) configurer {
|
||||
return func(conf *config) {
|
||||
conf.noProxyURL = noProxyURL
|
||||
}
|
||||
}
|
||||
|
||||
// WithProxyFromEnv is a configurer for ObsClient to get proxy from evironment.
|
||||
func WithProxyFromEnv(proxyFromEnv bool) configurer {
|
||||
return func(conf *config) {
|
||||
conf.proxyFromEnv = proxyFromEnv
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxConnections is a configurer for ObsClient to set the maximum number of idle HTTP connections.
|
||||
func WithMaxConnections(maxConnsPerHost int) configurer {
|
||||
return func(conf *config) {
|
||||
conf.maxConnsPerHost = maxConnsPerHost
|
||||
}
|
||||
}
|
||||
|
||||
// WithPathStyle is a configurer for ObsClient.
|
||||
func WithPathStyle(pathStyle bool) configurer {
|
||||
return func(conf *config) {
|
||||
conf.pathStyle = pathStyle
|
||||
}
|
||||
}
|
||||
|
||||
// WithSignature is a configurer for ObsClient.
|
||||
func WithSignature(signature SignatureType) configurer {
|
||||
return func(conf *config) {
|
||||
conf.signature = signature
|
||||
}
|
||||
}
|
||||
|
||||
// WithRegion is a configurer for ObsClient.
|
||||
func WithRegion(region string) configurer {
|
||||
return func(conf *config) {
|
||||
conf.region = region
|
||||
}
|
||||
}
|
||||
|
||||
// WithConnectTimeout is a configurer for ObsClient to set timeout period for establishing
|
||||
// an http/https connection, in seconds.
|
||||
func WithConnectTimeout(connectTimeout int) configurer {
|
||||
return func(conf *config) {
|
||||
conf.connectTimeout = connectTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithSocketTimeout is a configurer for ObsClient to set the timeout duration for transmitting data at
|
||||
// the socket layer, in seconds.
|
||||
func WithSocketTimeout(socketTimeout int) configurer {
|
||||
return func(conf *config) {
|
||||
conf.socketTimeout = socketTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithIdleConnTimeout is a configurer for ObsClient to set the timeout period of an idle HTTP connection
|
||||
// in the connection pool, in seconds.
|
||||
func WithIdleConnTimeout(idleConnTimeout int) configurer {
|
||||
return func(conf *config) {
|
||||
conf.idleConnTimeout = idleConnTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxRetryCount is a configurer for ObsClient to set the maximum number of retries when an HTTP/HTTPS connection is abnormal.
|
||||
func WithMaxRetryCount(maxRetryCount int) configurer {
|
||||
return func(conf *config) {
|
||||
conf.maxRetryCount = maxRetryCount
|
||||
}
|
||||
}
|
||||
|
||||
// WithSecurityToken is a configurer for ObsClient to set the security token in the temporary access keys.
|
||||
func WithSecurityToken(securityToken string) configurer {
|
||||
return func(conf *config) {
|
||||
for _, sp := range conf.securityProviders {
|
||||
if bsp, ok := sp.(*BasicSecurityProvider); ok {
|
||||
sh := bsp.getSecurity()
|
||||
bsp.refresh(sh.ak, sh.sk, securityToken)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithHttpTransport is a configurer for ObsClient to set the customized http Transport.
|
||||
func WithHttpTransport(transport *http.Transport) configurer {
|
||||
return func(conf *config) {
|
||||
conf.transport = transport
|
||||
}
|
||||
}
|
||||
|
||||
func WithHttpClient(httpClient *http.Client) configurer {
|
||||
return func(conf *config) {
|
||||
conf.httpClient = httpClient
|
||||
}
|
||||
}
|
||||
|
||||
// WithRequestContext is a configurer for ObsClient to set the context for each HTTP request.
|
||||
func WithRequestContext(ctx context.Context) configurer {
|
||||
return func(conf *config) {
|
||||
conf.ctx = ctx
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustomDomainName is a configurer for ObsClient.
|
||||
func WithCustomDomainName(cname bool) configurer {
|
||||
return func(conf *config) {
|
||||
conf.cname = cname
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxRedirectCount is a configurer for ObsClient to set the maximum number of times that the request is redirected.
|
||||
func WithMaxRedirectCount(maxRedirectCount int) configurer {
|
||||
return func(conf *config) {
|
||||
conf.maxRedirectCount = maxRedirectCount
|
||||
}
|
||||
}
|
||||
|
||||
// WithUserAgent is a configurer for ObsClient to set the User-Agent.
|
||||
func WithUserAgent(userAgent string) configurer {
|
||||
return func(conf *config) {
|
||||
conf.userAgent = userAgent
|
||||
}
|
||||
}
|
||||
|
||||
// WithEnableCompression is a configurer for ObsClient to set the Transport.DisableCompression.
|
||||
func WithEnableCompression(enableCompression bool) configurer {
|
||||
return func(conf *config) {
|
||||
conf.enableCompression = enableCompression
|
||||
}
|
||||
}
|
||||
|
||||
func (conf *config) prepareConfig() {
|
||||
if conf.connectTimeout <= 0 {
|
||||
conf.connectTimeout = DEFAULT_CONNECT_TIMEOUT
|
||||
}
|
||||
|
||||
if conf.socketTimeout <= 0 {
|
||||
conf.socketTimeout = DEFAULT_SOCKET_TIMEOUT
|
||||
}
|
||||
|
||||
conf.finalTimeout = conf.socketTimeout * 10
|
||||
|
||||
if conf.headerTimeout <= 0 {
|
||||
conf.headerTimeout = DEFAULT_HEADER_TIMEOUT
|
||||
}
|
||||
|
||||
if conf.idleConnTimeout < 0 {
|
||||
conf.idleConnTimeout = DEFAULT_IDLE_CONN_TIMEOUT
|
||||
}
|
||||
|
||||
if conf.maxRetryCount < 0 {
|
||||
conf.maxRetryCount = DEFAULT_MAX_RETRY_COUNT
|
||||
}
|
||||
|
||||
if conf.maxConnsPerHost <= 0 {
|
||||
conf.maxConnsPerHost = DEFAULT_MAX_CONN_PER_HOST
|
||||
}
|
||||
|
||||
if conf.maxRedirectCount < 0 {
|
||||
conf.maxRedirectCount = DEFAULT_MAX_REDIRECT_COUNT
|
||||
}
|
||||
|
||||
if conf.pathStyle && conf.signature == SignatureObs {
|
||||
conf.signature = SignatureV2
|
||||
}
|
||||
}
|
||||
|
||||
func (conf *config) initConfigWithDefault() error {
|
||||
conf.endpoint = strings.TrimSpace(conf.endpoint)
|
||||
if conf.endpoint == "" {
|
||||
return errors.New("endpoint is not set")
|
||||
}
|
||||
|
||||
if index := strings.Index(conf.endpoint, "?"); index > 0 {
|
||||
conf.endpoint = conf.endpoint[:index]
|
||||
}
|
||||
|
||||
for strings.LastIndex(conf.endpoint, "/") == len(conf.endpoint)-1 {
|
||||
conf.endpoint = conf.endpoint[:len(conf.endpoint)-1]
|
||||
}
|
||||
|
||||
if conf.signature == "" {
|
||||
conf.signature = DEFAULT_SIGNATURE
|
||||
}
|
||||
|
||||
urlHolder := &urlHolder{}
|
||||
var address string
|
||||
if strings.HasPrefix(conf.endpoint, "https://") {
|
||||
urlHolder.scheme = "https"
|
||||
address = conf.endpoint[len("https://"):]
|
||||
} else if strings.HasPrefix(conf.endpoint, "http://") {
|
||||
urlHolder.scheme = "http"
|
||||
address = conf.endpoint[len("http://"):]
|
||||
} else {
|
||||
urlHolder.scheme = "https"
|
||||
address = conf.endpoint
|
||||
}
|
||||
|
||||
addr := strings.Split(address, ":")
|
||||
if len(addr) == 2 {
|
||||
if port, err := strconv.Atoi(addr[1]); err == nil {
|
||||
urlHolder.port = port
|
||||
}
|
||||
}
|
||||
urlHolder.host = addr[0]
|
||||
if urlHolder.port == 0 {
|
||||
if urlHolder.scheme == "https" {
|
||||
urlHolder.port = 443
|
||||
} else {
|
||||
urlHolder.port = 80
|
||||
}
|
||||
}
|
||||
|
||||
if IsIP(urlHolder.host) {
|
||||
conf.pathStyle = true
|
||||
}
|
||||
|
||||
conf.urlHolder = urlHolder
|
||||
|
||||
conf.region = strings.TrimSpace(conf.region)
|
||||
if conf.region == "" {
|
||||
conf.region = DEFAULT_REGION
|
||||
}
|
||||
|
||||
conf.prepareConfig()
|
||||
conf.proxyURL = strings.TrimSpace(conf.proxyURL)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conf *config) getTransport() error {
|
||||
if conf.transport == nil {
|
||||
conf.transport = &http.Transport{
|
||||
Dial: func(network, addr string) (net.Conn, error) {
|
||||
conn, err := net.DialTimeout(network, addr, time.Second*time.Duration(conf.connectTimeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return getConnDelegate(conn, conf.socketTimeout, conf.finalTimeout), nil
|
||||
},
|
||||
MaxIdleConns: conf.maxConnsPerHost,
|
||||
MaxIdleConnsPerHost: conf.maxConnsPerHost,
|
||||
ResponseHeaderTimeout: time.Second * time.Duration(conf.headerTimeout),
|
||||
IdleConnTimeout: time.Second * time.Duration(conf.idleConnTimeout),
|
||||
}
|
||||
if conf.proxyURL != "" {
|
||||
conf.transport.Proxy = conf.customProxyFromEnvironment
|
||||
} else if conf.proxyFromEnv {
|
||||
conf.transport.Proxy = http.ProxyFromEnvironment
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{InsecureSkipVerify: !conf.sslVerify}
|
||||
if conf.sslVerify && conf.pemCerts != nil {
|
||||
pool := x509.NewCertPool()
|
||||
pool.AppendCertsFromPEM(conf.pemCerts)
|
||||
tlsConfig.RootCAs = pool
|
||||
}
|
||||
|
||||
conf.transport.TLSClientConfig = tlsConfig
|
||||
conf.transport.DisableCompression = !conf.enableCompression
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conf *config) customProxyFromEnvironment(req *http.Request) (*url.URL, error) {
|
||||
url, err := conf.customProxyFunc()(req.URL)
|
||||
return url, err
|
||||
}
|
||||
|
||||
func (conf *config) customProxyFunc() func(*url.URL) (*url.URL, error) {
|
||||
conf.customProxyOnce.Do(func() {
|
||||
customhttpproxy := &httpproxy.Config{
|
||||
HTTPProxy: conf.proxyURL,
|
||||
HTTPSProxy: conf.proxyURL,
|
||||
NoProxy: conf.noProxyURL,
|
||||
CGI: os.Getenv("REQUEST_METHOD") != "",
|
||||
}
|
||||
conf.customProxyFuncValue = customhttpproxy.ProxyFunc()
|
||||
})
|
||||
return conf.customProxyFuncValue
|
||||
}
|
||||
|
||||
func checkRedirectFunc(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
|
||||
// DummyQueryEscape return the input string.
|
||||
func DummyQueryEscape(s string) string {
|
||||
return s
|
||||
}
|
||||
|
||||
func (conf *config) prepareBaseURL(bucketName string) (requestURL string, canonicalizedURL string) {
|
||||
urlHolder := conf.urlHolder
|
||||
if conf.cname {
|
||||
requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port)
|
||||
if conf.signature == "v4" {
|
||||
canonicalizedURL = "/"
|
||||
} else {
|
||||
canonicalizedURL = "/" + urlHolder.host + "/"
|
||||
}
|
||||
} else {
|
||||
if bucketName == "" {
|
||||
requestURL = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port)
|
||||
canonicalizedURL = "/"
|
||||
} else {
|
||||
if conf.pathStyle {
|
||||
requestURL = fmt.Sprintf("%s://%s:%d/%s", urlHolder.scheme, urlHolder.host, urlHolder.port, bucketName)
|
||||
canonicalizedURL = "/" + bucketName
|
||||
} else {
|
||||
requestURL = fmt.Sprintf("%s://%s.%s:%d", urlHolder.scheme, bucketName, urlHolder.host, urlHolder.port)
|
||||
if conf.signature == "v2" || conf.signature == "OBS" {
|
||||
canonicalizedURL = "/" + bucketName + "/"
|
||||
} else {
|
||||
canonicalizedURL = "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (conf *config) prepareObjectKey(escape bool, objectKey string, escapeFunc func(s string) string) (encodeObjectKey string) {
|
||||
if escape {
|
||||
tempKey := []rune(objectKey)
|
||||
result := make([]string, 0, len(tempKey))
|
||||
for _, value := range tempKey {
|
||||
if string(value) == "/" {
|
||||
result = append(result, string(value))
|
||||
} else {
|
||||
if string(value) == " " {
|
||||
result = append(result, url.PathEscape(string(value)))
|
||||
} else {
|
||||
result = append(result, url.QueryEscape(string(value)))
|
||||
}
|
||||
}
|
||||
}
|
||||
encodeObjectKey = strings.Join(result, "")
|
||||
} else {
|
||||
encodeObjectKey = escapeFunc(objectKey)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (conf *config) prepareEscapeFunc(escape bool) (escapeFunc func(s string) string) {
|
||||
if escape {
|
||||
return url.QueryEscape
|
||||
}
|
||||
return DummyQueryEscape
|
||||
}
|
||||
|
||||
func (conf *config) formatUrls(bucketName, objectKey string, params map[string]string, escape bool) (requestURL string, canonicalizedURL string) {
|
||||
|
||||
requestURL, canonicalizedURL = conf.prepareBaseURL(bucketName)
|
||||
var escapeFunc func(s string) string
|
||||
escapeFunc = conf.prepareEscapeFunc(escape)
|
||||
|
||||
if objectKey != "" {
|
||||
var encodeObjectKey string
|
||||
encodeObjectKey = conf.prepareObjectKey(escape, objectKey, escapeFunc)
|
||||
requestURL += "/" + encodeObjectKey
|
||||
if !strings.HasSuffix(canonicalizedURL, "/") {
|
||||
canonicalizedURL += "/"
|
||||
}
|
||||
canonicalizedURL += encodeObjectKey
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(params))
|
||||
for key := range params {
|
||||
keys = append(keys, strings.TrimSpace(key))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
i := 0
|
||||
|
||||
for index, key := range keys {
|
||||
if index == 0 {
|
||||
requestURL += "?"
|
||||
} else {
|
||||
requestURL += "&"
|
||||
}
|
||||
_key := url.QueryEscape(key)
|
||||
requestURL += _key
|
||||
|
||||
_value := params[key]
|
||||
if conf.signature == "v4" {
|
||||
requestURL += "=" + url.QueryEscape(_value)
|
||||
} else {
|
||||
if _value != "" {
|
||||
requestURL += "=" + url.QueryEscape(_value)
|
||||
_value = "=" + _value
|
||||
} else {
|
||||
_value = ""
|
||||
}
|
||||
lowerKey := strings.ToLower(key)
|
||||
_, ok := allowedResourceParameterNames[lowerKey]
|
||||
prefixHeader := HEADER_PREFIX
|
||||
isObs := conf.signature == SignatureObs
|
||||
if isObs {
|
||||
prefixHeader = HEADER_PREFIX_OBS
|
||||
}
|
||||
ok = ok || strings.HasPrefix(lowerKey, prefixHeader)
|
||||
if ok {
|
||||
if i == 0 {
|
||||
canonicalizedURL += "?"
|
||||
} else {
|
||||
canonicalizedURL += "&"
|
||||
}
|
||||
canonicalizedURL += getQueryURL(_key, _value)
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getQueryURL(key, value string) string {
|
||||
queryURL := ""
|
||||
queryURL += key
|
||||
queryURL += value
|
||||
return queryURL
|
||||
}
|
||||
|
||||
var once sync.Once
|
||||
|
||||
func (obsClient ObsClient) GetClientConfigure(extensions []extensionOptions) *config {
|
||||
once.Do(func() {
|
||||
for _, extension := range extensions {
|
||||
if configure, ok := extension.(configurer); ok {
|
||||
configure(obsClient.conf)
|
||||
}
|
||||
}
|
||||
})
|
||||
return obsClient.conf
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) getProgressListener(extensions []extensionOptions) ProgressListener {
|
||||
return obsClient.GetClientConfigure(extensions).progressListener
|
||||
}
|
292
myhwoss/obs/const.go
Normal file
292
myhwoss/obs/const.go
Normal file
@@ -0,0 +1,292 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
const (
|
||||
OBS_SDK_VERSION = "3.23.4"
|
||||
USER_AGENT = "obs-sdk-go/" + OBS_SDK_VERSION
|
||||
HEADER_PREFIX = "x-amz-"
|
||||
HEADER_PREFIX_META = "x-amz-meta-"
|
||||
HEADER_PREFIX_OBS = "x-obs-"
|
||||
HEADER_PREFIX_META_OBS = "x-obs-meta-"
|
||||
HEADER_DATE_AMZ = "x-amz-date"
|
||||
HEADER_DATE_OBS = "x-obs-date"
|
||||
HEADER_STS_TOKEN_AMZ = "x-amz-security-token"
|
||||
HEADER_STS_TOKEN_OBS = "x-obs-security-token"
|
||||
HEADER_ACCESSS_KEY_AMZ = "AWSAccessKeyId"
|
||||
PREFIX_META = "meta-"
|
||||
|
||||
HEADER_CONTENT_SHA256_AMZ = "x-amz-content-sha256"
|
||||
HEADER_ACL_AMZ = "x-amz-acl"
|
||||
HEADER_ACL_OBS = "x-obs-acl"
|
||||
HEADER_ACL = "acl"
|
||||
HEADER_LOCATION_AMZ = "location"
|
||||
HEADER_BUCKET_LOCATION_OBS = "bucket-location"
|
||||
HEADER_COPY_SOURCE = "copy-source"
|
||||
HEADER_COPY_SOURCE_RANGE = "copy-source-range"
|
||||
HEADER_RANGE = "Range"
|
||||
HEADER_STORAGE_CLASS = "x-default-storage-class"
|
||||
HEADER_STORAGE_CLASS_OBS = "x-obs-storage-class"
|
||||
HEADER_FS_FILE_INTERFACE_OBS = "x-obs-fs-file-interface"
|
||||
HEADER_MODE = "mode"
|
||||
HEADER_VERSION_OBS = "version"
|
||||
HEADER_REQUEST_PAYER = "x-amz-request-payer"
|
||||
HEADER_GRANT_READ_OBS = "grant-read"
|
||||
HEADER_GRANT_WRITE_OBS = "grant-write"
|
||||
HEADER_GRANT_READ_ACP_OBS = "grant-read-acp"
|
||||
HEADER_GRANT_WRITE_ACP_OBS = "grant-write-acp"
|
||||
HEADER_GRANT_FULL_CONTROL_OBS = "grant-full-control"
|
||||
HEADER_GRANT_READ_DELIVERED_OBS = "grant-read-delivered"
|
||||
HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS = "grant-full-control-delivered"
|
||||
HEADER_REQUEST_ID = "request-id"
|
||||
HEADER_ERROR_CODE = "error-code"
|
||||
HEADER_ERROR_MESSAGE = "error-message"
|
||||
HEADER_BUCKET_REGION = "bucket-region"
|
||||
HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN = "access-control-allow-origin"
|
||||
HEADER_ACCESS_CONRTOL_ALLOW_HEADERS = "access-control-allow-headers"
|
||||
HEADER_ACCESS_CONRTOL_MAX_AGE = "access-control-max-age"
|
||||
HEADER_ACCESS_CONRTOL_ALLOW_METHODS = "access-control-allow-methods"
|
||||
HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS = "access-control-expose-headers"
|
||||
HEADER_EPID_HEADERS = "epid"
|
||||
HEADER_VERSION_ID = "version-id"
|
||||
HEADER_COPY_SOURCE_VERSION_ID = "copy-source-version-id"
|
||||
HEADER_DELETE_MARKER = "delete-marker"
|
||||
HEADER_WEBSITE_REDIRECT_LOCATION = "website-redirect-location"
|
||||
HEADER_METADATA_DIRECTIVE = "metadata-directive"
|
||||
HEADER_EXPIRATION = "expiration"
|
||||
HEADER_EXPIRES_OBS = "x-obs-expires"
|
||||
HEADER_RESTORE = "restore"
|
||||
HEADER_OBJECT_TYPE = "object-type"
|
||||
HEADER_NEXT_APPEND_POSITION = "next-append-position"
|
||||
HEADER_STORAGE_CLASS2 = "storage-class"
|
||||
HEADER_CONTENT_LENGTH = "content-length"
|
||||
HEADER_CONTENT_TYPE = "content-type"
|
||||
HEADER_CONTENT_LANGUAGE = "content-language"
|
||||
HEADER_EXPIRES = "expires"
|
||||
HEADER_CACHE_CONTROL = "cache-control"
|
||||
HEADER_CONTENT_DISPOSITION = "content-disposition"
|
||||
HEADER_CONTENT_ENCODING = "content-encoding"
|
||||
HEADER_AZ_REDUNDANCY = "az-redundancy"
|
||||
HEADER_BUCKET_TYPE = "bucket-type"
|
||||
HEADER_BUCKET_REDUNDANCY = "bucket-redundancy"
|
||||
HEADER_FUSION_ALLOW_UPGRADE = "fusion-allow-upgrade"
|
||||
HEADER_FUSION_ALLOW_ALT = "fusion-allow-alternative"
|
||||
headerOefMarker = "oef-marker"
|
||||
|
||||
HEADER_ETAG = "etag"
|
||||
HEADER_LASTMODIFIED = "last-modified"
|
||||
|
||||
HEADER_COPY_SOURCE_IF_MATCH = "copy-source-if-match"
|
||||
HEADER_COPY_SOURCE_IF_NONE_MATCH = "copy-source-if-none-match"
|
||||
HEADER_COPY_SOURCE_IF_MODIFIED_SINCE = "copy-source-if-modified-since"
|
||||
HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE = "copy-source-if-unmodified-since"
|
||||
|
||||
HEADER_IF_MATCH = "If-Match"
|
||||
HEADER_IF_NONE_MATCH = "If-None-Match"
|
||||
HEADER_IF_MODIFIED_SINCE = "If-Modified-Since"
|
||||
HEADER_IF_UNMODIFIED_SINCE = "If-Unmodified-Since"
|
||||
|
||||
HEADER_SSEC_ENCRYPTION = "server-side-encryption-customer-algorithm"
|
||||
HEADER_SSEC_KEY = "server-side-encryption-customer-key"
|
||||
HEADER_SSEC_KEY_MD5 = "server-side-encryption-customer-key-MD5"
|
||||
|
||||
HEADER_SSEKMS_ENCRYPTION = "server-side-encryption"
|
||||
HEADER_SSEKMS_KEY = "server-side-encryption-aws-kms-key-id"
|
||||
HEADER_SSEKMS_ENCRYPT_KEY_OBS = "server-side-encryption-kms-key-id"
|
||||
|
||||
HEADER_SSEC_COPY_SOURCE_ENCRYPTION = "copy-source-server-side-encryption-customer-algorithm"
|
||||
HEADER_SSEC_COPY_SOURCE_KEY = "copy-source-server-side-encryption-customer-key"
|
||||
HEADER_SSEC_COPY_SOURCE_KEY_MD5 = "copy-source-server-side-encryption-customer-key-MD5"
|
||||
|
||||
HEADER_SSEKMS_KEY_AMZ = "x-amz-server-side-encryption-aws-kms-key-id"
|
||||
|
||||
HEADER_SSEKMS_KEY_OBS = "x-obs-server-side-encryption-kms-key-id"
|
||||
|
||||
HEADER_SUCCESS_ACTION_REDIRECT = "success_action_redirect"
|
||||
|
||||
headerFSFileInterface = "fs-file-interface"
|
||||
|
||||
HEADER_DATE_CAMEL = "Date"
|
||||
HEADER_HOST_CAMEL = "Host"
|
||||
HEADER_HOST = "host"
|
||||
HEADER_AUTH_CAMEL = "Authorization"
|
||||
HEADER_MD5_CAMEL = "Content-MD5"
|
||||
HEADER_LOCATION_CAMEL = "Location"
|
||||
HEADER_CONTENT_LENGTH_CAMEL = "Content-Length"
|
||||
HEADER_CONTENT_TYPE_CAML = "Content-Type"
|
||||
HEADER_USER_AGENT_CAMEL = "User-Agent"
|
||||
HEADER_ORIGIN_CAMEL = "Origin"
|
||||
HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL = "Access-Control-Request-Headers"
|
||||
HEADER_CACHE_CONTROL_CAMEL = "Cache-Control"
|
||||
HEADER_CONTENT_DISPOSITION_CAMEL = "Content-Disposition"
|
||||
HEADER_CONTENT_ENCODING_CAMEL = "Content-Encoding"
|
||||
HEADER_CONTENT_LANGUAGE_CAMEL = "Content-Language"
|
||||
HEADER_EXPIRES_CAMEL = "Expires"
|
||||
HEADER_ACCEPT_ENCODING = "Accept-Encoding"
|
||||
|
||||
PARAM_VERSION_ID = "versionId"
|
||||
PARAM_RESPONSE_CONTENT_TYPE = "response-content-type"
|
||||
PARAM_RESPONSE_CONTENT_LANGUAGE = "response-content-language"
|
||||
PARAM_RESPONSE_EXPIRES = "response-expires"
|
||||
PARAM_RESPONSE_CACHE_CONTROL = "response-cache-control"
|
||||
PARAM_RESPONSE_CONTENT_DISPOSITION = "response-content-disposition"
|
||||
PARAM_RESPONSE_CONTENT_ENCODING = "response-content-encoding"
|
||||
PARAM_IMAGE_PROCESS = "x-image-process"
|
||||
|
||||
PARAM_ALGORITHM_AMZ_CAMEL = "X-Amz-Algorithm"
|
||||
PARAM_CREDENTIAL_AMZ_CAMEL = "X-Amz-Credential"
|
||||
PARAM_DATE_AMZ_CAMEL = "X-Amz-Date"
|
||||
PARAM_DATE_OBS_CAMEL = "X-Obs-Date"
|
||||
PARAM_EXPIRES_AMZ_CAMEL = "X-Amz-Expires"
|
||||
PARAM_SIGNEDHEADERS_AMZ_CAMEL = "X-Amz-SignedHeaders"
|
||||
PARAM_SIGNATURE_AMZ_CAMEL = "X-Amz-Signature"
|
||||
|
||||
DEFAULT_SIGNATURE = SignatureV2
|
||||
DEFAULT_REGION = "region"
|
||||
DEFAULT_CONNECT_TIMEOUT = 60
|
||||
DEFAULT_SOCKET_TIMEOUT = 60
|
||||
DEFAULT_HEADER_TIMEOUT = 60
|
||||
DEFAULT_IDLE_CONN_TIMEOUT = 30
|
||||
DEFAULT_MAX_RETRY_COUNT = 3
|
||||
DEFAULT_MAX_REDIRECT_COUNT = 3
|
||||
DEFAULT_MAX_CONN_PER_HOST = 1000
|
||||
UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD"
|
||||
LONG_DATE_FORMAT = "20060102T150405Z"
|
||||
SHORT_DATE_FORMAT = "20060102"
|
||||
ISO8601_DATE_FORMAT = "2006-01-02T15:04:05Z"
|
||||
ISO8601_MIDNIGHT_DATE_FORMAT = "2006-01-02T00:00:00Z"
|
||||
RFC1123_FORMAT = "Mon, 02 Jan 2006 15:04:05 GMT"
|
||||
|
||||
V4_SERVICE_NAME = "s3"
|
||||
V4_SERVICE_SUFFIX = "aws4_request"
|
||||
|
||||
V2_HASH_PREFIX = "AWS"
|
||||
OBS_HASH_PREFIX = "OBS"
|
||||
|
||||
V4_HASH_PREFIX = "AWS4-HMAC-SHA256"
|
||||
V4_HASH_PRE = "AWS4"
|
||||
|
||||
DEFAULT_SSE_KMS_ENCRYPTION = "aws:kms"
|
||||
DEFAULT_SSE_KMS_ENCRYPTION_OBS = "kms"
|
||||
|
||||
DEFAULT_SSE_C_ENCRYPTION = "AES256"
|
||||
|
||||
HTTP_GET = "GET"
|
||||
HTTP_POST = "POST"
|
||||
HTTP_PUT = "PUT"
|
||||
HTTP_DELETE = "DELETE"
|
||||
HTTP_HEAD = "HEAD"
|
||||
HTTP_OPTIONS = "OPTIONS"
|
||||
|
||||
REQUEST_PAYER = "request-payer"
|
||||
TRAFFIC_LIMIT = "traffic-limit"
|
||||
CALLBACK = "callback"
|
||||
MULTI_AZ = "3az"
|
||||
|
||||
MAX_PART_SIZE = 5 * 1024 * 1024 * 1024
|
||||
MIN_PART_SIZE = 100 * 1024
|
||||
DEFAULT_PART_SIZE = 9 * 1024 * 1024
|
||||
MAX_PART_NUM = 10000
|
||||
|
||||
GET_OBJECT = "GetObject"
|
||||
PUT_OBJECT = "PutObject"
|
||||
PUT_FILE = "PutFile"
|
||||
APPEND_OBJECT = "AppendObject"
|
||||
)
|
||||
|
||||
var (
|
||||
interestedHeaders = []string{"content-md5", "content-type", "date"}
|
||||
|
||||
allowedRequestHTTPHeaderMetadataNames = map[string]bool{
|
||||
"content-type": true,
|
||||
"content-md5": true,
|
||||
"content-length": true,
|
||||
"content-language": true,
|
||||
"expires": true,
|
||||
"origin": true,
|
||||
"cache-control": true,
|
||||
"content-disposition": true,
|
||||
"content-encoding": true,
|
||||
"access-control-request-method": true,
|
||||
"access-control-request-headers": true,
|
||||
"x-default-storage-class": true,
|
||||
"location": true,
|
||||
"date": true,
|
||||
"etag": true,
|
||||
"range": true,
|
||||
"host": true,
|
||||
"if-modified-since": true,
|
||||
"if-unmodified-since": true,
|
||||
"if-match": true,
|
||||
"if-none-match": true,
|
||||
"last-modified": true,
|
||||
"content-range": true,
|
||||
"accept-encoding": true,
|
||||
}
|
||||
|
||||
allowedLogResponseHTTPHeaderNames = map[string]bool{
|
||||
"content-type": true,
|
||||
"etag": true,
|
||||
"connection": true,
|
||||
"content-length": true,
|
||||
"date": true,
|
||||
"server": true,
|
||||
}
|
||||
|
||||
allowedResourceParameterNames = map[string]bool{
|
||||
"acl": true,
|
||||
"backtosource": true,
|
||||
"metadata": true,
|
||||
"policy": true,
|
||||
"torrent": true,
|
||||
"logging": true,
|
||||
"location": true,
|
||||
"storageinfo": true,
|
||||
"quota": true,
|
||||
"storageclass": true,
|
||||
"storagepolicy": true,
|
||||
"requestpayment": true,
|
||||
"versions": true,
|
||||
"versioning": true,
|
||||
"versionid": true,
|
||||
"uploads": true,
|
||||
"uploadid": true,
|
||||
"partnumber": true,
|
||||
"website": true,
|
||||
"notification": true,
|
||||
"lifecycle": true,
|
||||
"deletebucket": true,
|
||||
"delete": true,
|
||||
"cors": true,
|
||||
"restore": true,
|
||||
"encryption": true,
|
||||
"tagging": true,
|
||||
"append": true,
|
||||
"modify": true,
|
||||
"position": true,
|
||||
"replication": true,
|
||||
"response-content-type": true,
|
||||
"response-content-language": true,
|
||||
"response-expires": true,
|
||||
"response-cache-control": true,
|
||||
"response-content-disposition": true,
|
||||
"response-content-encoding": true,
|
||||
"x-image-process": true,
|
||||
"x-oss-process": true,
|
||||
"x-image-save-bucket": true,
|
||||
"x-image-save-object": true,
|
||||
"ignore-sign-in-query": true,
|
||||
"name": true,
|
||||
"rename": true,
|
||||
}
|
||||
)
|
1244
myhwoss/obs/convert.go
Normal file
1244
myhwoss/obs/convert.go
Normal file
File diff suppressed because it is too large
Load Diff
35
myhwoss/obs/error.go
Normal file
35
myhwoss/obs/error.go
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ObsError defines error response from OBS
|
||||
type ObsError struct {
|
||||
BaseModel
|
||||
Status string
|
||||
XMLName xml.Name `xml:"Error"`
|
||||
Code string `xml:"Code" json:"code"`
|
||||
Message string `xml:"Message" json:"message"`
|
||||
Resource string `xml:"Resource"`
|
||||
HostId string `xml:"HostId"`
|
||||
}
|
||||
|
||||
// Format print obs error's log
|
||||
func (err ObsError) Error() string {
|
||||
return fmt.Sprintf("obs: service returned error: Status=%s, Code=%s, Message=%s, RequestId=%s",
|
||||
err.Status, err.Code, err.Message, err.RequestId)
|
||||
}
|
60
myhwoss/obs/extension.go
Normal file
60
myhwoss/obs/extension.go
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type extensionOptions interface{}
|
||||
type extensionHeaders func(headers map[string][]string, isObs bool) error
|
||||
|
||||
func WithProgress(progressListener ProgressListener) configurer {
|
||||
return func(conf *config) {
|
||||
conf.progressListener = progressListener
|
||||
}
|
||||
}
|
||||
func setHeaderPrefix(key string, value string) extensionHeaders {
|
||||
return func(headers map[string][]string, isObs bool) error {
|
||||
if strings.TrimSpace(value) == "" {
|
||||
return fmt.Errorf("set header %s with empty value", key)
|
||||
}
|
||||
setHeaders(headers, key, []string{value}, isObs)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithReqPaymentHeader sets header for requester-pays
|
||||
func WithReqPaymentHeader(requester PayerType) extensionHeaders {
|
||||
return setHeaderPrefix(REQUEST_PAYER, string(requester))
|
||||
}
|
||||
|
||||
func WithTrafficLimitHeader(trafficLimit int64) extensionHeaders {
|
||||
return setHeaderPrefix(TRAFFIC_LIMIT, strconv.FormatInt(trafficLimit, 10))
|
||||
}
|
||||
|
||||
func WithCallbackHeader(callback string) extensionHeaders {
|
||||
return setHeaderPrefix(CALLBACK, string(callback))
|
||||
}
|
||||
|
||||
func WithCustomHeader(key string, value string) extensionHeaders {
|
||||
return func(headers map[string][]string, isObs bool) error {
|
||||
if strings.TrimSpace(value) == "" {
|
||||
return fmt.Errorf("set header %s with empty value", key)
|
||||
}
|
||||
headers[key] = []string{value}
|
||||
return nil
|
||||
}
|
||||
}
|
673
myhwoss/obs/http.go
Normal file
673
myhwoss/obs/http.go
Normal file
@@ -0,0 +1,673 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func prepareHeaders(headers map[string][]string, meta bool, isObs bool) map[string][]string {
|
||||
_headers := make(map[string][]string, len(headers))
|
||||
for key, value := range headers {
|
||||
key = strings.TrimSpace(key)
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
_key := strings.ToLower(key)
|
||||
if _, ok := allowedRequestHTTPHeaderMetadataNames[_key]; !ok && !strings.HasPrefix(key, HEADER_PREFIX) && !strings.HasPrefix(key, HEADER_PREFIX_OBS) {
|
||||
if !meta {
|
||||
continue
|
||||
}
|
||||
if !isObs {
|
||||
_key = HEADER_PREFIX_META + _key
|
||||
} else {
|
||||
_key = HEADER_PREFIX_META_OBS + _key
|
||||
}
|
||||
} else {
|
||||
_key = key
|
||||
}
|
||||
_headers[_key] = value
|
||||
}
|
||||
return _headers
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) checkParamsWithBucketName(bucketName string) bool {
|
||||
return strings.TrimSpace(bucketName) == "" && !obsClient.conf.cname
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) checkParamsWithObjectKey(objectKey string) bool {
|
||||
return strings.TrimSpace(objectKey) == ""
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doActionWithoutBucket(action, method string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
|
||||
return obsClient.doAction(action, method, "", "", input, output, true, true, extensions, nil)
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doActionWithBucketV2(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
|
||||
if obsClient.checkParamsWithBucketName(bucketName) {
|
||||
return errors.New("Bucket is empty")
|
||||
}
|
||||
return obsClient.doAction(action, method, bucketName, "", input, output, false, true, extensions, nil)
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doActionWithBucket(action, method, bucketName string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
|
||||
if obsClient.checkParamsWithBucketName(bucketName) {
|
||||
return errors.New("Bucket is empty")
|
||||
}
|
||||
return obsClient.doAction(action, method, bucketName, "", input, output, true, true, extensions, nil)
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
|
||||
if obsClient.checkParamsWithBucketName(bucketName) {
|
||||
return errors.New("Bucket is empty")
|
||||
}
|
||||
if obsClient.checkParamsWithObjectKey(objectKey) {
|
||||
return errors.New("Key is empty")
|
||||
}
|
||||
return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, true, extensions, nil)
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doActionWithBucketAndKeyWithProgress(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions, listener ProgressListener) error {
|
||||
if obsClient.checkParamsWithBucketName(bucketName) {
|
||||
return errors.New("Bucket is empty")
|
||||
}
|
||||
if obsClient.checkParamsWithObjectKey(objectKey) {
|
||||
return errors.New("Key is empty")
|
||||
}
|
||||
return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, true, extensions, listener)
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doActionWithBucketAndKeyV2(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
|
||||
if obsClient.checkParamsWithBucketName(bucketName) {
|
||||
return errors.New("Bucket is empty")
|
||||
}
|
||||
if obsClient.checkParamsWithObjectKey(objectKey) {
|
||||
return errors.New("Key is empty")
|
||||
}
|
||||
return obsClient.doAction(action, method, bucketName, objectKey, input, output, false, true, extensions, nil)
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doActionWithBucketAndKeyUnRepeatable(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions) error {
|
||||
if obsClient.checkParamsWithBucketName(bucketName) {
|
||||
return errors.New("Bucket is empty")
|
||||
}
|
||||
if obsClient.checkParamsWithObjectKey(objectKey) {
|
||||
return errors.New("Key is empty")
|
||||
}
|
||||
return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, false, extensions, nil)
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doActionWithBucketAndKeyUnRepeatableWithProgress(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, extensions []extensionOptions, listener ProgressListener) error {
|
||||
if obsClient.checkParamsWithBucketName(bucketName) {
|
||||
return errors.New("Bucket is empty")
|
||||
}
|
||||
if obsClient.checkParamsWithObjectKey(objectKey) {
|
||||
return errors.New("Key is empty")
|
||||
}
|
||||
return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, false, extensions, listener)
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doAction(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, xmlResult bool, repeatable bool, extensions []extensionOptions, listener ProgressListener) error {
|
||||
|
||||
var resp *http.Response
|
||||
var respError error
|
||||
doLog(LEVEL_INFO, "Enter method %s...", action)
|
||||
start := GetCurrentTimestamp()
|
||||
isObs := obsClient.conf.signature == SignatureObs
|
||||
|
||||
params, headers, data, err := input.trans(isObs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if params == nil {
|
||||
params = make(map[string]string)
|
||||
}
|
||||
|
||||
if headers == nil {
|
||||
headers = make(map[string][]string)
|
||||
}
|
||||
|
||||
for _, extension := range extensions {
|
||||
if extensionHeader, ok := extension.(extensionHeaders); ok {
|
||||
if _err := extensionHeader(headers, isObs); err != nil {
|
||||
doLog(LEVEL_INFO, fmt.Sprintf("set header with error: %v", _err))
|
||||
}
|
||||
} else {
|
||||
doLog(LEVEL_INFO, "Unsupported extensionOptions")
|
||||
}
|
||||
}
|
||||
|
||||
resp, respError = obsClient.doHTTPRequest(method, bucketName, objectKey, params, headers, data, repeatable, listener)
|
||||
|
||||
if respError == nil && output != nil {
|
||||
respError = HandleHttpResponse(action, headers, output, resp, xmlResult, isObs)
|
||||
} else {
|
||||
doLog(LEVEL_WARN, "Do http request with error: %v", respError)
|
||||
}
|
||||
|
||||
if isDebugLogEnabled() {
|
||||
doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start))
|
||||
}
|
||||
|
||||
return respError
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doHTTPRequest(method, bucketName, objectKey string, params map[string]string,
|
||||
headers map[string][]string, data interface{}, repeatable bool, listener ProgressListener) (*http.Response, error) {
|
||||
return obsClient.doHTTP(method, bucketName, objectKey, params, prepareHeaders(headers, false, obsClient.conf.signature == SignatureObs), data, repeatable, listener)
|
||||
}
|
||||
|
||||
func prepareAgentHeader(clientUserAgent string) string {
|
||||
userAgent := USER_AGENT
|
||||
if clientUserAgent != "" {
|
||||
userAgent = clientUserAgent
|
||||
}
|
||||
return userAgent
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) getSignedURLResponse(action string, output IBaseModel, xmlResult bool, resp *http.Response, err error, start int64) (respError error) {
|
||||
var msg interface{}
|
||||
if err != nil {
|
||||
respError = err
|
||||
resp = nil
|
||||
} else {
|
||||
doLog(LEVEL_DEBUG, "Response headers: %s", logResponseHeader(resp.Header))
|
||||
if resp.StatusCode >= 300 {
|
||||
respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
|
||||
msg = resp.Status
|
||||
resp = nil
|
||||
} else {
|
||||
if output != nil {
|
||||
respError = ParseResponseToBaseModel(resp, output, xmlResult, obsClient.conf.signature == SignatureObs)
|
||||
}
|
||||
if respError != nil {
|
||||
doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if msg != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg)
|
||||
}
|
||||
|
||||
if isDebugLogEnabled() {
|
||||
doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doHTTPWithSignedURL(action, method string, signedURL string, actualSignedRequestHeaders http.Header, data io.Reader, output IBaseModel, xmlResult bool) (respError error) {
|
||||
req, err := http.NewRequest(method, signedURL, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if obsClient.conf.ctx != nil {
|
||||
req = req.WithContext(obsClient.conf.ctx)
|
||||
}
|
||||
var resp *http.Response
|
||||
|
||||
var isSecurityToken bool
|
||||
var securityToken string
|
||||
var query []string
|
||||
parmas := strings.Split(signedURL, "?")
|
||||
if len(parmas) > 1 {
|
||||
query = strings.Split(parmas[1], "&")
|
||||
for _, value := range query {
|
||||
if strings.HasPrefix(value, HEADER_STS_TOKEN_AMZ+"=") || strings.HasPrefix(value, HEADER_STS_TOKEN_OBS+"=") {
|
||||
if value[len(HEADER_STS_TOKEN_AMZ)+1:] != "" {
|
||||
securityToken = value[len(HEADER_STS_TOKEN_AMZ)+1:]
|
||||
isSecurityToken = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logSignedURL := signedURL
|
||||
if isSecurityToken {
|
||||
logSignedURL = strings.Replace(logSignedURL, securityToken, "******", -1)
|
||||
}
|
||||
doLog(LEVEL_INFO, "Do %s with signedUrl %s...", action, logSignedURL)
|
||||
|
||||
req.Header = actualSignedRequestHeaders
|
||||
if value, ok := req.Header[HEADER_HOST_CAMEL]; ok {
|
||||
req.Host = value[0]
|
||||
delete(req.Header, HEADER_HOST_CAMEL)
|
||||
} else if value, ok := req.Header[HEADER_HOST]; ok {
|
||||
req.Host = value[0]
|
||||
delete(req.Header, HEADER_HOST)
|
||||
}
|
||||
|
||||
if value, ok := req.Header[HEADER_CONTENT_LENGTH_CAMEL]; ok {
|
||||
req.ContentLength = StringToInt64(value[0], -1)
|
||||
delete(req.Header, HEADER_CONTENT_LENGTH_CAMEL)
|
||||
} else if value, ok := req.Header[HEADER_CONTENT_LENGTH]; ok {
|
||||
req.ContentLength = StringToInt64(value[0], -1)
|
||||
delete(req.Header, HEADER_CONTENT_LENGTH)
|
||||
}
|
||||
|
||||
userAgent := prepareAgentHeader(obsClient.conf.userAgent)
|
||||
req.Header[HEADER_USER_AGENT_CAMEL] = []string{userAgent}
|
||||
start := GetCurrentTimestamp()
|
||||
resp, err = obsClient.httpClient.Do(req)
|
||||
if isInfoLogEnabled() {
|
||||
doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start))
|
||||
}
|
||||
|
||||
respError = obsClient.getSignedURLResponse(action, output, xmlResult, resp, err, start)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func prepareData(headers map[string][]string, data interface{}) (io.Reader, error) {
|
||||
var _data io.Reader
|
||||
if data != nil {
|
||||
if dataStr, ok := data.(string); ok {
|
||||
doLog(LEVEL_DEBUG, "Do http request with string")
|
||||
headers["Content-Length"] = []string{IntToString(len(dataStr))}
|
||||
_data = strings.NewReader(dataStr)
|
||||
} else if dataByte, ok := data.([]byte); ok {
|
||||
doLog(LEVEL_DEBUG, "Do http request with byte array")
|
||||
headers["Content-Length"] = []string{IntToString(len(dataByte))}
|
||||
_data = bytes.NewReader(dataByte)
|
||||
} else if dataReader, ok := data.(io.Reader); ok {
|
||||
_data = dataReader
|
||||
} else {
|
||||
doLog(LEVEL_WARN, "Data is not a valid io.Reader")
|
||||
return nil, errors.New("Data is not a valid io.Reader")
|
||||
}
|
||||
}
|
||||
return _data, nil
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) getRequest(redirectURL, requestURL string, redirectFlag bool, _data io.Reader, method,
|
||||
bucketName, objectKey string, params map[string]string, headers map[string][]string) (*http.Request, error) {
|
||||
if redirectURL != "" {
|
||||
if !redirectFlag {
|
||||
parsedRedirectURL, err := url.Parse(redirectURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, parsedRedirectURL.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if parsedRequestURL, err := url.Parse(requestURL); err != nil {
|
||||
return nil, err
|
||||
} else if parsedRequestURL.RawQuery != "" && parsedRedirectURL.RawQuery == "" {
|
||||
redirectURL += "?" + parsedRequestURL.RawQuery
|
||||
}
|
||||
}
|
||||
requestURL = redirectURL
|
||||
} else {
|
||||
var err error
|
||||
requestURL, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, requestURL, _data)
|
||||
if obsClient.conf.ctx != nil {
|
||||
req = req.WithContext(obsClient.conf.ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
doLog(LEVEL_DEBUG, "Do request with url [%s] and method [%s]", requestURL, method)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func logHeaders(headers map[string][]string, signature SignatureType) {
|
||||
if isDebugLogEnabled() {
|
||||
auth := headers[HEADER_AUTH_CAMEL]
|
||||
delete(headers, HEADER_AUTH_CAMEL)
|
||||
|
||||
var isSecurityToken bool
|
||||
var securityToken []string
|
||||
if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_AMZ]; isSecurityToken {
|
||||
headers[HEADER_STS_TOKEN_AMZ] = []string{"******"}
|
||||
} else if securityToken, isSecurityToken = headers[HEADER_STS_TOKEN_OBS]; isSecurityToken {
|
||||
headers[HEADER_STS_TOKEN_OBS] = []string{"******"}
|
||||
}
|
||||
doLog(LEVEL_DEBUG, "Request headers: %s", logRequestHeader(headers))
|
||||
headers[HEADER_AUTH_CAMEL] = auth
|
||||
if isSecurityToken {
|
||||
if signature == SignatureObs {
|
||||
headers[HEADER_STS_TOKEN_OBS] = securityToken
|
||||
} else {
|
||||
headers[HEADER_STS_TOKEN_AMZ] = securityToken
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func prepareReq(headers map[string][]string, req, lastRequest *http.Request, clientUserAgent string) *http.Request {
|
||||
for key, value := range headers {
|
||||
if key == HEADER_HOST_CAMEL {
|
||||
req.Host = value[0]
|
||||
delete(headers, key)
|
||||
} else if key == HEADER_CONTENT_LENGTH_CAMEL {
|
||||
req.ContentLength = StringToInt64(value[0], -1)
|
||||
delete(headers, key)
|
||||
} else {
|
||||
req.Header[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
lastRequest = req
|
||||
|
||||
userAgent := prepareAgentHeader(clientUserAgent)
|
||||
req.Header[HEADER_USER_AGENT_CAMEL] = []string{userAgent}
|
||||
|
||||
if lastRequest != nil {
|
||||
req.Host = lastRequest.Host
|
||||
req.ContentLength = lastRequest.ContentLength
|
||||
}
|
||||
return lastRequest
|
||||
}
|
||||
|
||||
func canNotRetry(repeatable bool, statusCode int) bool {
|
||||
if !repeatable || (statusCode >= 400 && statusCode < 500) || statusCode == 304 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isRedirectErr(location string, redirectCount, maxRedirectCount int) bool {
|
||||
if location != "" && redirectCount < maxRedirectCount {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func setRedirectFlag(statusCode int, method string) (redirectFlag bool) {
|
||||
if statusCode == 302 && method == HTTP_GET {
|
||||
redirectFlag = true
|
||||
} else {
|
||||
redirectFlag = false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func prepareRetry(resp *http.Response, headers map[string][]string, _data io.Reader, msg interface{}) (io.Reader, *http.Response, error) {
|
||||
if resp != nil {
|
||||
_err := resp.Body.Close()
|
||||
checkAndLogErr(_err, LEVEL_WARN, "Failed to close resp body")
|
||||
resp = nil
|
||||
}
|
||||
|
||||
if _, ok := headers[HEADER_DATE_CAMEL]; ok {
|
||||
headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(time.Now().UTC())}
|
||||
}
|
||||
|
||||
if _, ok := headers[HEADER_DATE_AMZ]; ok {
|
||||
headers[HEADER_DATE_AMZ] = []string{FormatUtcToRfc1123(time.Now().UTC())}
|
||||
}
|
||||
|
||||
if _, ok := headers[HEADER_AUTH_CAMEL]; ok {
|
||||
delete(headers, HEADER_AUTH_CAMEL)
|
||||
}
|
||||
doLog(LEVEL_WARN, "Failed to send request with reason:%v, will try again", msg)
|
||||
if r, ok := _data.(*strings.Reader); ok {
|
||||
_, err := r.Seek(0, 0)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else if r, ok := _data.(*bytes.Reader); ok {
|
||||
_, err := r.Seek(0, 0)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else if r, ok := _data.(*fileReaderWrapper); ok {
|
||||
fd, err := os.Open(r.filePath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fileReaderWrapper := &fileReaderWrapper{filePath: r.filePath}
|
||||
fileReaderWrapper.mark = r.mark
|
||||
fileReaderWrapper.reader = fd
|
||||
fileReaderWrapper.totalCount = r.totalCount
|
||||
_data = fileReaderWrapper
|
||||
_, err = fd.Seek(r.mark, 0)
|
||||
if err != nil {
|
||||
errMsg := fd.Close()
|
||||
checkAndLogErr(errMsg, LEVEL_WARN, "Failed to close with reason: %v", errMsg)
|
||||
return nil, nil, err
|
||||
}
|
||||
} else if r, ok := _data.(*readerWrapper); ok {
|
||||
_, err := r.seek(0, 0)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
r.readedCount = 0
|
||||
}
|
||||
return _data, resp, nil
|
||||
}
|
||||
|
||||
// handleBody handles request body
|
||||
func handleBody(req *http.Request, body io.Reader, listener ProgressListener, tracker *readerTracker) {
|
||||
reader := body
|
||||
readerLen, err := GetReaderLen(reader)
|
||||
if err == nil {
|
||||
req.ContentLength = readerLen
|
||||
}
|
||||
if req.ContentLength > 0 {
|
||||
req.Header.Set(HEADER_CONTENT_LENGTH_CAMEL, strconv.FormatInt(req.ContentLength, 10))
|
||||
}
|
||||
|
||||
if reader != nil {
|
||||
reader = TeeReader(reader, req.ContentLength, listener, tracker)
|
||||
}
|
||||
|
||||
// HTTP body
|
||||
rc, ok := reader.(io.ReadCloser)
|
||||
if !ok && reader != nil {
|
||||
rc = ioutil.NopCloser(reader)
|
||||
}
|
||||
|
||||
req.Body = rc
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) doHTTP(method, bucketName, objectKey string, params map[string]string,
|
||||
headers map[string][]string, data interface{}, repeatable bool, listener ProgressListener) (resp *http.Response, respError error) {
|
||||
|
||||
bucketName = strings.TrimSpace(bucketName)
|
||||
|
||||
method = strings.ToUpper(method)
|
||||
|
||||
var redirectURL string
|
||||
var requestURL string
|
||||
maxRetryCount := obsClient.conf.maxRetryCount
|
||||
maxRedirectCount := obsClient.conf.maxRedirectCount
|
||||
|
||||
_data, _err := prepareData(headers, data)
|
||||
if _err != nil {
|
||||
return nil, _err
|
||||
}
|
||||
|
||||
var lastRequest *http.Request
|
||||
redirectFlag := false
|
||||
|
||||
tracker := &readerTracker{completedBytes: 0}
|
||||
|
||||
for i, redirectCount := 0, 0; i <= maxRetryCount; i++ {
|
||||
req, err := obsClient.getRequest(redirectURL, requestURL, redirectFlag, _data,
|
||||
method, bucketName, objectKey, params, headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
handleBody(req, _data, listener, tracker)
|
||||
|
||||
logHeaders(headers, obsClient.conf.signature)
|
||||
|
||||
lastRequest = prepareReq(headers, req, lastRequest, obsClient.conf.userAgent)
|
||||
|
||||
// Transfer started
|
||||
event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
|
||||
publishProgress(listener, event)
|
||||
|
||||
start := GetCurrentTimestamp()
|
||||
resp, err = obsClient.httpClient.Do(req)
|
||||
doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start))
|
||||
|
||||
var msg interface{}
|
||||
if err != nil {
|
||||
msg = err
|
||||
respError = err
|
||||
resp = nil
|
||||
if !repeatable {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
doLog(LEVEL_DEBUG, "Response headers: %s", logResponseHeader(resp.Header))
|
||||
if resp.StatusCode < 300 {
|
||||
respError = nil
|
||||
break
|
||||
} else if canNotRetry(repeatable, resp.StatusCode) {
|
||||
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
|
||||
publishProgress(listener, event)
|
||||
|
||||
respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
|
||||
resp = nil
|
||||
break
|
||||
} else if resp.StatusCode >= 300 && resp.StatusCode < 400 {
|
||||
location := resp.Header.Get(HEADER_LOCATION_CAMEL)
|
||||
if isRedirectErr(location, redirectCount, maxRedirectCount) {
|
||||
redirectURL = location
|
||||
doLog(LEVEL_WARN, "Redirect request to %s", redirectURL)
|
||||
msg = resp.Status
|
||||
maxRetryCount++
|
||||
redirectCount++
|
||||
redirectFlag = setRedirectFlag(resp.StatusCode, method)
|
||||
} else {
|
||||
respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
|
||||
resp = nil
|
||||
break
|
||||
}
|
||||
} else {
|
||||
msg = resp.Status
|
||||
}
|
||||
}
|
||||
if i != maxRetryCount {
|
||||
_data, resp, err = prepareRetry(resp, headers, _data, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r, ok := _data.(*fileReaderWrapper); ok {
|
||||
if _fd, _ok := r.reader.(*os.File); _ok {
|
||||
defer func() {
|
||||
errMsg := _fd.Close()
|
||||
checkAndLogErr(errMsg, LEVEL_WARN, "Failed to close with reason: %v", errMsg)
|
||||
}()
|
||||
}
|
||||
}
|
||||
time.Sleep(time.Duration(float64(i+2) * rand.Float64() * float64(time.Second)))
|
||||
} else {
|
||||
doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg)
|
||||
if resp != nil {
|
||||
respError = ParseResponseToObsError(resp, obsClient.conf.signature == SignatureObs)
|
||||
resp = nil
|
||||
}
|
||||
event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
|
||||
publishProgress(listener, event)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type connDelegate struct {
|
||||
conn net.Conn
|
||||
socketTimeout time.Duration
|
||||
finalTimeout time.Duration
|
||||
}
|
||||
|
||||
func getConnDelegate(conn net.Conn, socketTimeout int, finalTimeout int) *connDelegate {
|
||||
return &connDelegate{
|
||||
conn: conn,
|
||||
socketTimeout: time.Second * time.Duration(socketTimeout),
|
||||
finalTimeout: time.Second * time.Duration(finalTimeout),
|
||||
}
|
||||
}
|
||||
|
||||
func (delegate *connDelegate) Read(b []byte) (n int, err error) {
|
||||
setReadDeadlineErr := delegate.SetReadDeadline(time.Now().Add(delegate.socketTimeout))
|
||||
flag := isDebugLogEnabled()
|
||||
|
||||
if setReadDeadlineErr != nil && flag {
|
||||
doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
|
||||
}
|
||||
|
||||
n, err = delegate.conn.Read(b)
|
||||
setReadDeadlineErr = delegate.SetReadDeadline(time.Now().Add(delegate.finalTimeout))
|
||||
if setReadDeadlineErr != nil && flag {
|
||||
doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (delegate *connDelegate) Write(b []byte) (n int, err error) {
|
||||
setWriteDeadlineErr := delegate.SetWriteDeadline(time.Now().Add(delegate.socketTimeout))
|
||||
flag := isDebugLogEnabled()
|
||||
if setWriteDeadlineErr != nil && flag {
|
||||
doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr)
|
||||
}
|
||||
|
||||
n, err = delegate.conn.Write(b)
|
||||
finalTimeout := time.Now().Add(delegate.finalTimeout)
|
||||
setWriteDeadlineErr = delegate.SetWriteDeadline(finalTimeout)
|
||||
if setWriteDeadlineErr != nil && flag {
|
||||
doLog(LEVEL_DEBUG, "Failed to set write deadline with reason: %v, but it's ok", setWriteDeadlineErr)
|
||||
}
|
||||
setReadDeadlineErr := delegate.SetReadDeadline(finalTimeout)
|
||||
if setReadDeadlineErr != nil && flag {
|
||||
doLog(LEVEL_DEBUG, "Failed to set read deadline with reason: %v, but it's ok", setReadDeadlineErr)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (delegate *connDelegate) Close() error {
|
||||
return delegate.conn.Close()
|
||||
}
|
||||
|
||||
func (delegate *connDelegate) LocalAddr() net.Addr {
|
||||
return delegate.conn.LocalAddr()
|
||||
}
|
||||
|
||||
func (delegate *connDelegate) RemoteAddr() net.Addr {
|
||||
return delegate.conn.RemoteAddr()
|
||||
}
|
||||
|
||||
func (delegate *connDelegate) SetDeadline(t time.Time) error {
|
||||
return delegate.conn.SetDeadline(t)
|
||||
}
|
||||
|
||||
func (delegate *connDelegate) SetReadDeadline(t time.Time) error {
|
||||
return delegate.conn.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
func (delegate *connDelegate) SetWriteDeadline(t time.Time) error {
|
||||
return delegate.conn.SetWriteDeadline(t)
|
||||
}
|
376
myhwoss/obs/log.go
Normal file
376
myhwoss/obs/log.go
Normal file
@@ -0,0 +1,376 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Level defines the level of the log
|
||||
type Level int
|
||||
|
||||
const (
|
||||
LEVEL_OFF Level = 500
|
||||
LEVEL_ERROR Level = 400
|
||||
LEVEL_WARN Level = 300
|
||||
LEVEL_INFO Level = 200
|
||||
LEVEL_DEBUG Level = 100
|
||||
)
|
||||
|
||||
var logLevelMap = map[Level]string{
|
||||
LEVEL_OFF: "[OFF]: ",
|
||||
LEVEL_ERROR: "[ERROR]: ",
|
||||
LEVEL_WARN: "[WARN]: ",
|
||||
LEVEL_INFO: "[INFO]: ",
|
||||
LEVEL_DEBUG: "[DEBUG]: ",
|
||||
}
|
||||
|
||||
type logConfType struct {
|
||||
level Level
|
||||
logToConsole bool
|
||||
logFullPath string
|
||||
maxLogSize int64
|
||||
backups int
|
||||
}
|
||||
|
||||
func getDefaultLogConf() logConfType {
|
||||
return logConfType{
|
||||
level: LEVEL_WARN,
|
||||
logToConsole: false,
|
||||
logFullPath: "",
|
||||
maxLogSize: 1024 * 1024 * 30, //30MB
|
||||
backups: 10,
|
||||
}
|
||||
}
|
||||
|
||||
var logConf logConfType
|
||||
|
||||
type loggerWrapper struct {
|
||||
fullPath string
|
||||
fd *os.File
|
||||
ch chan string
|
||||
wg sync.WaitGroup
|
||||
queue []string
|
||||
logger *log.Logger
|
||||
index int
|
||||
cacheCount int
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (lw *loggerWrapper) doInit() {
|
||||
lw.queue = make([]string, 0, lw.cacheCount)
|
||||
lw.logger = log.New(lw.fd, "", 0)
|
||||
lw.ch = make(chan string, lw.cacheCount)
|
||||
lw.wg.Add(1)
|
||||
go lw.doWrite()
|
||||
}
|
||||
|
||||
func (lw *loggerWrapper) rotate() {
|
||||
stat, err := lw.fd.Stat()
|
||||
if err != nil {
|
||||
_err := lw.fd.Close()
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
if stat.Size() >= logConf.maxLogSize {
|
||||
_err := lw.fd.Sync()
|
||||
if _err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_err = lw.fd.Close()
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
|
||||
}
|
||||
if lw.index > logConf.backups {
|
||||
lw.index = 1
|
||||
}
|
||||
_err = os.Rename(lw.fullPath, lw.fullPath+"."+IntToString(lw.index))
|
||||
if _err != nil {
|
||||
panic(err)
|
||||
}
|
||||
lw.index++
|
||||
|
||||
fd, err := os.OpenFile(lw.fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
lw.fd = fd
|
||||
lw.logger.SetOutput(lw.fd)
|
||||
}
|
||||
}
|
||||
|
||||
func (lw *loggerWrapper) doFlush() {
|
||||
lw.rotate()
|
||||
for _, m := range lw.queue {
|
||||
lw.logger.Println(m)
|
||||
}
|
||||
err := lw.fd.Sync()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (lw *loggerWrapper) doClose() {
|
||||
lw.closed = true
|
||||
close(lw.ch)
|
||||
lw.wg.Wait()
|
||||
}
|
||||
|
||||
func (lw *loggerWrapper) doWrite() {
|
||||
defer lw.wg.Done()
|
||||
for {
|
||||
msg, ok := <-lw.ch
|
||||
if !ok {
|
||||
lw.doFlush()
|
||||
_err := lw.fd.Close()
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
|
||||
}
|
||||
break
|
||||
}
|
||||
if len(lw.queue) >= lw.cacheCount {
|
||||
lw.doFlush()
|
||||
lw.queue = make([]string, 0, lw.cacheCount)
|
||||
}
|
||||
lw.queue = append(lw.queue, msg)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (lw *loggerWrapper) Printf(format string, v ...interface{}) {
|
||||
if !lw.closed {
|
||||
msg := fmt.Sprintf(format, v...)
|
||||
lw.ch <- msg
|
||||
}
|
||||
}
|
||||
|
||||
var consoleLogger *log.Logger
|
||||
var fileLogger *loggerWrapper
|
||||
var lock = new(sync.RWMutex)
|
||||
|
||||
func isDebugLogEnabled() bool {
|
||||
return logConf.level <= LEVEL_DEBUG
|
||||
}
|
||||
|
||||
func isErrorLogEnabled() bool {
|
||||
return logConf.level <= LEVEL_ERROR
|
||||
}
|
||||
|
||||
func isWarnLogEnabled() bool {
|
||||
return logConf.level <= LEVEL_WARN
|
||||
}
|
||||
|
||||
func isInfoLogEnabled() bool {
|
||||
return logConf.level <= LEVEL_INFO
|
||||
}
|
||||
|
||||
func reset() {
|
||||
if fileLogger != nil {
|
||||
fileLogger.doClose()
|
||||
fileLogger = nil
|
||||
}
|
||||
consoleLogger = nil
|
||||
logConf = getDefaultLogConf()
|
||||
}
|
||||
|
||||
// InitLog enable logging function with default cacheCnt
|
||||
func InitLog(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool) error {
|
||||
return InitLogWithCacheCnt(logFullPath, maxLogSize, backups, level, logToConsole, 50)
|
||||
}
|
||||
|
||||
// InitLogWithCacheCnt enable logging function
|
||||
func InitLogWithCacheCnt(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool, cacheCnt int) error {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if cacheCnt <= 0 {
|
||||
cacheCnt = 50
|
||||
}
|
||||
reset()
|
||||
if fullPath := strings.TrimSpace(logFullPath); fullPath != "" {
|
||||
_fullPath, err := filepath.Abs(fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(_fullPath, ".log") {
|
||||
_fullPath += ".log"
|
||||
}
|
||||
|
||||
stat, fd, err := initLogFile(_fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
prefix := stat.Name() + "."
|
||||
index := 1
|
||||
var timeIndex int64 = 0
|
||||
walkFunc := func(path string, info os.FileInfo, err error) error {
|
||||
if err == nil {
|
||||
if name := info.Name(); strings.HasPrefix(name, prefix) {
|
||||
if i := StringToInt(name[len(prefix):], 0); i >= index && info.ModTime().Unix() >= timeIndex {
|
||||
timeIndex = info.ModTime().Unix()
|
||||
index = i + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err = filepath.Walk(filepath.Dir(_fullPath), walkFunc); err != nil {
|
||||
_err := fd.Close()
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
fileLogger = &loggerWrapper{fullPath: _fullPath, fd: fd, index: index, cacheCount: cacheCnt, closed: false}
|
||||
fileLogger.doInit()
|
||||
}
|
||||
if maxLogSize > 0 {
|
||||
logConf.maxLogSize = maxLogSize
|
||||
}
|
||||
if backups > 0 {
|
||||
logConf.backups = backups
|
||||
}
|
||||
logConf.level = level
|
||||
if logToConsole {
|
||||
consoleLogger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func initLogFile(_fullPath string) (os.FileInfo, *os.File, error) {
|
||||
stat, err := os.Stat(_fullPath)
|
||||
if err == nil && stat.IsDir() {
|
||||
return nil, nil, fmt.Errorf("logFullPath:[%s] is a directory", _fullPath)
|
||||
} else if err = os.MkdirAll(filepath.Dir(_fullPath), os.ModePerm); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(_fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
|
||||
if err != nil {
|
||||
_err := fd.Close()
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if stat == nil {
|
||||
stat, err = os.Stat(_fullPath)
|
||||
if err != nil {
|
||||
_err := fd.Close()
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close file with reason: %v", _err)
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return stat, fd, nil
|
||||
}
|
||||
|
||||
// CloseLog disable logging and synchronize cache data to log files
|
||||
func CloseLog() {
|
||||
if logEnabled() {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
reset()
|
||||
}
|
||||
}
|
||||
|
||||
func logEnabled() bool {
|
||||
return consoleLogger != nil || fileLogger != nil
|
||||
}
|
||||
|
||||
// DoLog writes log messages to the logger
|
||||
func DoLog(level Level, format string, v ...interface{}) {
|
||||
doLog(level, format, v...)
|
||||
}
|
||||
|
||||
func doLog(level Level, format string, v ...interface{}) {
|
||||
if logEnabled() && logConf.level <= level {
|
||||
msg := fmt.Sprintf(format, v...)
|
||||
if _, file, line, ok := runtime.Caller(1); ok {
|
||||
index := strings.LastIndex(file, "/")
|
||||
if index >= 0 {
|
||||
file = file[index+1:]
|
||||
}
|
||||
msg = fmt.Sprintf("%s:%d|%s", file, line, msg)
|
||||
}
|
||||
prefix := logLevelMap[level]
|
||||
defer func() {
|
||||
_ = recover()
|
||||
// ignore ch closed error
|
||||
}()
|
||||
if consoleLogger != nil {
|
||||
consoleLogger.Printf("%s%s", prefix, msg)
|
||||
}
|
||||
if fileLogger != nil {
|
||||
nowDate := FormatUtcNow("2006-01-02T15:04:05Z")
|
||||
fileLogger.Printf("%s %s%s", nowDate, prefix, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkAndLogErr(err error, level Level, format string, v ...interface{}) {
|
||||
if err != nil {
|
||||
doLog(level, format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
func logResponseHeader(respHeader http.Header) string {
|
||||
resp := make([]string, 0, len(respHeader)+1)
|
||||
for key, value := range respHeader {
|
||||
key = strings.TrimSpace(key)
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(key, HEADER_PREFIX) || strings.HasPrefix(key, HEADER_PREFIX_OBS) {
|
||||
key = key[len(HEADER_PREFIX):]
|
||||
}
|
||||
_key := strings.ToLower(key)
|
||||
if _, ok := allowedLogResponseHTTPHeaderNames[_key]; ok {
|
||||
resp = append(resp, fmt.Sprintf("%s: [%s]", key, value[0]))
|
||||
}
|
||||
if _key == HEADER_REQUEST_ID {
|
||||
resp = append(resp, fmt.Sprintf("%s: [%s]", key, value[0]))
|
||||
}
|
||||
}
|
||||
return strings.Join(resp, " ")
|
||||
}
|
||||
|
||||
func logRequestHeader(reqHeader http.Header) string {
|
||||
resp := make([]string, 0, len(reqHeader)+1)
|
||||
for key, value := range reqHeader {
|
||||
key = strings.TrimSpace(key)
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
_key := strings.ToLower(key)
|
||||
if _, ok := allowedRequestHTTPHeaderMetadataNames[_key]; ok {
|
||||
resp = append(resp, fmt.Sprintf("%s: [%s]", key, value[0]))
|
||||
}
|
||||
}
|
||||
return strings.Join(resp, " ")
|
||||
}
|
403
myhwoss/obs/mime.go
Normal file
403
myhwoss/obs/mime.go
Normal file
@@ -0,0 +1,403 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
var mimeTypes = map[string]string{
|
||||
"001": "application/x-001",
|
||||
"301": "application/x-301",
|
||||
"323": "text/h323",
|
||||
"7z": "application/x-7z-compressed",
|
||||
"906": "application/x-906",
|
||||
"907": "drawing/907",
|
||||
"IVF": "video/x-ivf",
|
||||
"a11": "application/x-a11",
|
||||
"aac": "audio/x-aac",
|
||||
"acp": "audio/x-mei-aac",
|
||||
"ai": "application/postscript",
|
||||
"aif": "audio/aiff",
|
||||
"aifc": "audio/aiff",
|
||||
"aiff": "audio/aiff",
|
||||
"anv": "application/x-anv",
|
||||
"apk": "application/vnd.android.package-archive",
|
||||
"asa": "text/asa",
|
||||
"asf": "video/x-ms-asf",
|
||||
"asp": "text/asp",
|
||||
"asx": "video/x-ms-asf",
|
||||
"atom": "application/atom+xml",
|
||||
"au": "audio/basic",
|
||||
"avi": "video/avi",
|
||||
"awf": "application/vnd.adobe.workflow",
|
||||
"biz": "text/xml",
|
||||
"bmp": "application/x-bmp",
|
||||
"bot": "application/x-bot",
|
||||
"bz2": "application/x-bzip2",
|
||||
"c4t": "application/x-c4t",
|
||||
"c90": "application/x-c90",
|
||||
"cal": "application/x-cals",
|
||||
"cat": "application/vnd.ms-pki.seccat",
|
||||
"cdf": "application/x-netcdf",
|
||||
"cdr": "application/x-cdr",
|
||||
"cel": "application/x-cel",
|
||||
"cer": "application/x-x509-ca-cert",
|
||||
"cg4": "application/x-g4",
|
||||
"cgm": "application/x-cgm",
|
||||
"cit": "application/x-cit",
|
||||
"class": "java/*",
|
||||
"cml": "text/xml",
|
||||
"cmp": "application/x-cmp",
|
||||
"cmx": "application/x-cmx",
|
||||
"cot": "application/x-cot",
|
||||
"crl": "application/pkix-crl",
|
||||
"crt": "application/x-x509-ca-cert",
|
||||
"csi": "application/x-csi",
|
||||
"css": "text/css",
|
||||
"csv": "text/csv",
|
||||
"cu": "application/cu-seeme",
|
||||
"cut": "application/x-cut",
|
||||
"dbf": "application/x-dbf",
|
||||
"dbm": "application/x-dbm",
|
||||
"dbx": "application/x-dbx",
|
||||
"dcd": "text/xml",
|
||||
"dcx": "application/x-dcx",
|
||||
"deb": "application/x-debian-package",
|
||||
"der": "application/x-x509-ca-cert",
|
||||
"dgn": "application/x-dgn",
|
||||
"dib": "application/x-dib",
|
||||
"dll": "application/x-msdownload",
|
||||
"doc": "application/msword",
|
||||
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"dot": "application/msword",
|
||||
"drw": "application/x-drw",
|
||||
"dtd": "text/xml",
|
||||
"dvi": "application/x-dvi",
|
||||
"dwf": "application/x-dwf",
|
||||
"dwg": "application/x-dwg",
|
||||
"dxb": "application/x-dxb",
|
||||
"dxf": "application/x-dxf",
|
||||
"edn": "application/vnd.adobe.edn",
|
||||
"emf": "application/x-emf",
|
||||
"eml": "message/rfc822",
|
||||
"ent": "text/xml",
|
||||
"eot": "application/vnd.ms-fontobject",
|
||||
"epi": "application/x-epi",
|
||||
"eps": "application/postscript",
|
||||
"epub": "application/epub+zip",
|
||||
"etd": "application/x-ebx",
|
||||
"etx": "text/x-setext",
|
||||
"exe": "application/x-msdownload",
|
||||
"fax": "image/fax",
|
||||
"fdf": "application/vnd.fdf",
|
||||
"fif": "application/fractals",
|
||||
"flac": "audio/flac",
|
||||
"flv": "video/x-flv",
|
||||
"fo": "text/xml",
|
||||
"frm": "application/x-frm",
|
||||
"g4": "application/x-g4",
|
||||
"gbr": "application/x-gbr",
|
||||
"gif": "image/gif",
|
||||
"gl2": "application/x-gl2",
|
||||
"gp4": "application/x-gp4",
|
||||
"gz": "application/gzip",
|
||||
"hgl": "application/x-hgl",
|
||||
"hmr": "application/x-hmr",
|
||||
"hpg": "application/x-hpgl",
|
||||
"hpl": "application/x-hpl",
|
||||
"hqx": "application/mac-binhex40",
|
||||
"hrf": "application/x-hrf",
|
||||
"hta": "application/hta",
|
||||
"htc": "text/x-component",
|
||||
"htm": "text/html",
|
||||
"html": "text/html",
|
||||
"htt": "text/webviewhtml",
|
||||
"htx": "text/html",
|
||||
"icb": "application/x-icb",
|
||||
"ico": "application/x-ico",
|
||||
"ics": "text/calendar",
|
||||
"iff": "application/x-iff",
|
||||
"ig4": "application/x-g4",
|
||||
"igs": "application/x-igs",
|
||||
"iii": "application/x-iphone",
|
||||
"img": "application/x-img",
|
||||
"ini": "text/plain",
|
||||
"ins": "application/x-internet-signup",
|
||||
"ipa": "application/vnd.iphone",
|
||||
"iso": "application/x-iso9660-image",
|
||||
"isp": "application/x-internet-signup",
|
||||
"jar": "application/java-archive",
|
||||
"java": "java/*",
|
||||
"jfif": "image/jpeg",
|
||||
"jpe": "image/jpeg",
|
||||
"jpeg": "image/jpeg",
|
||||
"jpg": "image/jpeg",
|
||||
"js": "application/x-javascript",
|
||||
"json": "application/json",
|
||||
"jsp": "text/html",
|
||||
"la1": "audio/x-liquid-file",
|
||||
"lar": "application/x-laplayer-reg",
|
||||
"latex": "application/x-latex",
|
||||
"lavs": "audio/x-liquid-secure",
|
||||
"lbm": "application/x-lbm",
|
||||
"lmsff": "audio/x-la-lms",
|
||||
"log": "text/plain",
|
||||
"ls": "application/x-javascript",
|
||||
"ltr": "application/x-ltr",
|
||||
"m1v": "video/x-mpeg",
|
||||
"m2v": "video/x-mpeg",
|
||||
"m3u": "audio/mpegurl",
|
||||
"m4a": "audio/mp4",
|
||||
"m4e": "video/mpeg4",
|
||||
"m4v": "video/mp4",
|
||||
"mac": "application/x-mac",
|
||||
"man": "application/x-troff-man",
|
||||
"math": "text/xml",
|
||||
"mdb": "application/msaccess",
|
||||
"mfp": "application/x-shockwave-flash",
|
||||
"mht": "message/rfc822",
|
||||
"mhtml": "message/rfc822",
|
||||
"mi": "application/x-mi",
|
||||
"mid": "audio/mid",
|
||||
"midi": "audio/mid",
|
||||
"mil": "application/x-mil",
|
||||
"mml": "text/xml",
|
||||
"mnd": "audio/x-musicnet-download",
|
||||
"mns": "audio/x-musicnet-stream",
|
||||
"mocha": "application/x-javascript",
|
||||
"mov": "video/quicktime",
|
||||
"movie": "video/x-sgi-movie",
|
||||
"mp1": "audio/mp1",
|
||||
"mp2": "audio/mp2",
|
||||
"mp2v": "video/mpeg",
|
||||
"mp3": "audio/mp3",
|
||||
"mp4": "video/mp4",
|
||||
"mp4a": "audio/mp4",
|
||||
"mp4v": "video/mp4",
|
||||
"mpa": "video/x-mpg",
|
||||
"mpd": "application/vnd.ms-project",
|
||||
"mpe": "video/mpeg",
|
||||
"mpeg": "video/mpeg",
|
||||
"mpg": "video/mpeg",
|
||||
"mpg4": "video/mp4",
|
||||
"mpga": "audio/rn-mpeg",
|
||||
"mpp": "application/vnd.ms-project",
|
||||
"mps": "video/x-mpeg",
|
||||
"mpt": "application/vnd.ms-project",
|
||||
"mpv": "video/mpg",
|
||||
"mpv2": "video/mpeg",
|
||||
"mpw": "application/vnd.ms-project",
|
||||
"mpx": "application/vnd.ms-project",
|
||||
"mtx": "text/xml",
|
||||
"mxp": "application/x-mmxp",
|
||||
"net": "image/pnetvue",
|
||||
"nrf": "application/x-nrf",
|
||||
"nws": "message/rfc822",
|
||||
"odc": "text/x-ms-odc",
|
||||
"oga": "audio/ogg",
|
||||
"ogg": "audio/ogg",
|
||||
"ogv": "video/ogg",
|
||||
"ogx": "application/ogg",
|
||||
"out": "application/x-out",
|
||||
"p10": "application/pkcs10",
|
||||
"p12": "application/x-pkcs12",
|
||||
"p7b": "application/x-pkcs7-certificates",
|
||||
"p7c": "application/pkcs7-mime",
|
||||
"p7m": "application/pkcs7-mime",
|
||||
"p7r": "application/x-pkcs7-certreqresp",
|
||||
"p7s": "application/pkcs7-signature",
|
||||
"pbm": "image/x-portable-bitmap",
|
||||
"pc5": "application/x-pc5",
|
||||
"pci": "application/x-pci",
|
||||
"pcl": "application/x-pcl",
|
||||
"pcx": "application/x-pcx",
|
||||
"pdf": "application/pdf",
|
||||
"pdx": "application/vnd.adobe.pdx",
|
||||
"pfx": "application/x-pkcs12",
|
||||
"pgl": "application/x-pgl",
|
||||
"pgm": "image/x-portable-graymap",
|
||||
"pic": "application/x-pic",
|
||||
"pko": "application/vnd.ms-pki.pko",
|
||||
"pl": "application/x-perl",
|
||||
"plg": "text/html",
|
||||
"pls": "audio/scpls",
|
||||
"plt": "application/x-plt",
|
||||
"png": "image/png",
|
||||
"pnm": "image/x-portable-anymap",
|
||||
"pot": "application/vnd.ms-powerpoint",
|
||||
"ppa": "application/vnd.ms-powerpoint",
|
||||
"ppm": "application/x-ppm",
|
||||
"pps": "application/vnd.ms-powerpoint",
|
||||
"ppt": "application/vnd.ms-powerpoint",
|
||||
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"pr": "application/x-pr",
|
||||
"prf": "application/pics-rules",
|
||||
"prn": "application/x-prn",
|
||||
"prt": "application/x-prt",
|
||||
"ps": "application/postscript",
|
||||
"ptn": "application/x-ptn",
|
||||
"pwz": "application/vnd.ms-powerpoint",
|
||||
"qt": "video/quicktime",
|
||||
"r3t": "text/vnd.rn-realtext3d",
|
||||
"ra": "audio/vnd.rn-realaudio",
|
||||
"ram": "audio/x-pn-realaudio",
|
||||
"rar": "application/x-rar-compressed",
|
||||
"ras": "application/x-ras",
|
||||
"rat": "application/rat-file",
|
||||
"rdf": "text/xml",
|
||||
"rec": "application/vnd.rn-recording",
|
||||
"red": "application/x-red",
|
||||
"rgb": "application/x-rgb",
|
||||
"rjs": "application/vnd.rn-realsystem-rjs",
|
||||
"rjt": "application/vnd.rn-realsystem-rjt",
|
||||
"rlc": "application/x-rlc",
|
||||
"rle": "application/x-rle",
|
||||
"rm": "application/vnd.rn-realmedia",
|
||||
"rmf": "application/vnd.adobe.rmf",
|
||||
"rmi": "audio/mid",
|
||||
"rmj": "application/vnd.rn-realsystem-rmj",
|
||||
"rmm": "audio/x-pn-realaudio",
|
||||
"rmp": "application/vnd.rn-rn_music_package",
|
||||
"rms": "application/vnd.rn-realmedia-secure",
|
||||
"rmvb": "application/vnd.rn-realmedia-vbr",
|
||||
"rmx": "application/vnd.rn-realsystem-rmx",
|
||||
"rnx": "application/vnd.rn-realplayer",
|
||||
"rp": "image/vnd.rn-realpix",
|
||||
"rpm": "audio/x-pn-realaudio-plugin",
|
||||
"rsml": "application/vnd.rn-rsml",
|
||||
"rss": "application/rss+xml",
|
||||
"rt": "text/vnd.rn-realtext",
|
||||
"rtf": "application/x-rtf",
|
||||
"rv": "video/vnd.rn-realvideo",
|
||||
"sam": "application/x-sam",
|
||||
"sat": "application/x-sat",
|
||||
"sdp": "application/sdp",
|
||||
"sdw": "application/x-sdw",
|
||||
"sgm": "text/sgml",
|
||||
"sgml": "text/sgml",
|
||||
"sis": "application/vnd.symbian.install",
|
||||
"sisx": "application/vnd.symbian.install",
|
||||
"sit": "application/x-stuffit",
|
||||
"slb": "application/x-slb",
|
||||
"sld": "application/x-sld",
|
||||
"slk": "drawing/x-slk",
|
||||
"smi": "application/smil",
|
||||
"smil": "application/smil",
|
||||
"smk": "application/x-smk",
|
||||
"snd": "audio/basic",
|
||||
"sol": "text/plain",
|
||||
"sor": "text/plain",
|
||||
"spc": "application/x-pkcs7-certificates",
|
||||
"spl": "application/futuresplash",
|
||||
"spp": "text/xml",
|
||||
"ssm": "application/streamingmedia",
|
||||
"sst": "application/vnd.ms-pki.certstore",
|
||||
"stl": "application/vnd.ms-pki.stl",
|
||||
"stm": "text/html",
|
||||
"sty": "application/x-sty",
|
||||
"svg": "image/svg+xml",
|
||||
"swf": "application/x-shockwave-flash",
|
||||
"tar": "application/x-tar",
|
||||
"tdf": "application/x-tdf",
|
||||
"tg4": "application/x-tg4",
|
||||
"tga": "application/x-tga",
|
||||
"tif": "image/tiff",
|
||||
"tiff": "image/tiff",
|
||||
"tld": "text/xml",
|
||||
"top": "drawing/x-top",
|
||||
"torrent": "application/x-bittorrent",
|
||||
"tsd": "text/xml",
|
||||
"ttf": "application/x-font-ttf",
|
||||
"txt": "text/plain",
|
||||
"uin": "application/x-icq",
|
||||
"uls": "text/iuls",
|
||||
"vcf": "text/x-vcard",
|
||||
"vda": "application/x-vda",
|
||||
"vdx": "application/vnd.visio",
|
||||
"vml": "text/xml",
|
||||
"vpg": "application/x-vpeg005",
|
||||
"vsd": "application/vnd.visio",
|
||||
"vss": "application/vnd.visio",
|
||||
"vst": "application/x-vst",
|
||||
"vsw": "application/vnd.visio",
|
||||
"vsx": "application/vnd.visio",
|
||||
"vtx": "application/vnd.visio",
|
||||
"vxml": "text/xml",
|
||||
"wav": "audio/wav",
|
||||
"wax": "audio/x-ms-wax",
|
||||
"wb1": "application/x-wb1",
|
||||
"wb2": "application/x-wb2",
|
||||
"wb3": "application/x-wb3",
|
||||
"wbmp": "image/vnd.wap.wbmp",
|
||||
"webm": "video/webm",
|
||||
"wiz": "application/msword",
|
||||
"wk3": "application/x-wk3",
|
||||
"wk4": "application/x-wk4",
|
||||
"wkq": "application/x-wkq",
|
||||
"wks": "application/x-wks",
|
||||
"wm": "video/x-ms-wm",
|
||||
"wma": "audio/x-ms-wma",
|
||||
"wmd": "application/x-ms-wmd",
|
||||
"wmf": "application/x-wmf",
|
||||
"wml": "text/vnd.wap.wml",
|
||||
"wmv": "video/x-ms-wmv",
|
||||
"wmx": "video/x-ms-wmx",
|
||||
"wmz": "application/x-ms-wmz",
|
||||
"woff": "application/x-font-woff",
|
||||
"wp6": "application/x-wp6",
|
||||
"wpd": "application/x-wpd",
|
||||
"wpg": "application/x-wpg",
|
||||
"wpl": "application/vnd.ms-wpl",
|
||||
"wq1": "application/x-wq1",
|
||||
"wr1": "application/x-wr1",
|
||||
"wri": "application/x-wri",
|
||||
"wrk": "application/x-wrk",
|
||||
"ws": "application/x-ws",
|
||||
"ws2": "application/x-ws",
|
||||
"wsc": "text/scriptlet",
|
||||
"wsdl": "text/xml",
|
||||
"wvx": "video/x-ms-wvx",
|
||||
"x_b": "application/x-x_b",
|
||||
"x_t": "application/x-x_t",
|
||||
"xap": "application/x-silverlight-app",
|
||||
"xbm": "image/x-xbitmap",
|
||||
"xdp": "application/vnd.adobe.xdp",
|
||||
"xdr": "text/xml",
|
||||
"xfd": "application/vnd.adobe.xfd",
|
||||
"xfdf": "application/vnd.adobe.xfdf",
|
||||
"xhtml": "text/html",
|
||||
"xls": "application/vnd.ms-excel",
|
||||
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"xlw": "application/x-xlw",
|
||||
"xml": "text/xml",
|
||||
"xpl": "audio/scpls",
|
||||
"xpm": "image/x-xpixmap",
|
||||
"xq": "text/xml",
|
||||
"xql": "text/xml",
|
||||
"xquery": "text/xml",
|
||||
"xsd": "text/xml",
|
||||
"xsl": "text/xml",
|
||||
"xslt": "text/xml",
|
||||
"xwd": "application/x-xwd",
|
||||
"yaml": "text/yaml",
|
||||
"yml": "text/yaml",
|
||||
"zip": "application/zip",
|
||||
"dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
|
||||
"wps": "application/vnd.ms-works",
|
||||
"pptm": "application/vnd.ms-powerpoint.presentation.macroenabled.12",
|
||||
"heic": "image/heic",
|
||||
"mkv": "video/x-matroska",
|
||||
"raw": "image/x-panasonic-raw",
|
||||
"webp": "image/webp",
|
||||
"3gp": "audio/3gpp",
|
||||
"3g2": "audio/3gpp2",
|
||||
"weba": "audio/webm",
|
||||
"woff2": "font/woff2",
|
||||
}
|
389
myhwoss/obs/model_base.go
Normal file
389
myhwoss/obs/model_base.go
Normal file
@@ -0,0 +1,389 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Bucket defines bucket properties
|
||||
type Bucket struct {
|
||||
XMLName xml.Name `xml:"Bucket"`
|
||||
Name string `xml:"Name"`
|
||||
CreationDate time.Time `xml:"CreationDate"`
|
||||
Location string `xml:"Location"`
|
||||
BucketType string `xml:"BucketType,omitempty"`
|
||||
}
|
||||
|
||||
// Owner defines owner properties
|
||||
type Owner struct {
|
||||
XMLName xml.Name `xml:"Owner"`
|
||||
ID string `xml:"ID"`
|
||||
DisplayName string `xml:"DisplayName,omitempty"`
|
||||
}
|
||||
|
||||
// Initiator defines initiator properties
|
||||
type Initiator struct {
|
||||
XMLName xml.Name `xml:"Initiator"`
|
||||
ID string `xml:"ID"`
|
||||
DisplayName string `xml:"DisplayName,omitempty"`
|
||||
}
|
||||
|
||||
type bucketLocationObs struct {
|
||||
XMLName xml.Name `xml:"Location"`
|
||||
Location string `xml:",chardata"`
|
||||
}
|
||||
|
||||
// BucketLocation defines bucket location configuration
|
||||
type BucketLocation struct {
|
||||
XMLName xml.Name `xml:"CreateBucketConfiguration"`
|
||||
Location string `xml:"LocationConstraint,omitempty"`
|
||||
}
|
||||
|
||||
// BucketStoragePolicy defines the bucket storage class
|
||||
type BucketStoragePolicy struct {
|
||||
XMLName xml.Name `xml:"StoragePolicy"`
|
||||
StorageClass StorageClassType `xml:"DefaultStorageClass"`
|
||||
}
|
||||
|
||||
type bucketStoragePolicyObs struct {
|
||||
XMLName xml.Name `xml:"StorageClass"`
|
||||
StorageClass string `xml:",chardata"`
|
||||
}
|
||||
|
||||
// Content defines the object content properties
|
||||
type Content struct {
|
||||
XMLName xml.Name `xml:"Contents"`
|
||||
Owner Owner `xml:"Owner"`
|
||||
ETag string `xml:"ETag"`
|
||||
Key string `xml:"Key"`
|
||||
LastModified time.Time `xml:"LastModified"`
|
||||
Size int64 `xml:"Size"`
|
||||
StorageClass StorageClassType `xml:"StorageClass"`
|
||||
}
|
||||
|
||||
// Version defines the properties of versioning objects
|
||||
type Version struct {
|
||||
DeleteMarker
|
||||
XMLName xml.Name `xml:"Version"`
|
||||
ETag string `xml:"ETag"`
|
||||
Size int64 `xml:"Size"`
|
||||
}
|
||||
|
||||
// DeleteMarker defines the properties of versioning delete markers
|
||||
type DeleteMarker struct {
|
||||
XMLName xml.Name `xml:"DeleteMarker"`
|
||||
Key string `xml:"Key"`
|
||||
VersionId string `xml:"VersionId"`
|
||||
IsLatest bool `xml:"IsLatest"`
|
||||
LastModified time.Time `xml:"LastModified"`
|
||||
Owner Owner `xml:"Owner"`
|
||||
StorageClass StorageClassType `xml:"StorageClass"`
|
||||
}
|
||||
|
||||
// Upload defines multipart upload properties
|
||||
type Upload struct {
|
||||
XMLName xml.Name `xml:"Upload"`
|
||||
Key string `xml:"Key"`
|
||||
UploadId string `xml:"UploadId"`
|
||||
Initiated time.Time `xml:"Initiated"`
|
||||
StorageClass StorageClassType `xml:"StorageClass"`
|
||||
Owner Owner `xml:"Owner"`
|
||||
Initiator Initiator `xml:"Initiator"`
|
||||
}
|
||||
|
||||
// BucketQuota defines bucket quota configuration
|
||||
type BucketQuota struct {
|
||||
XMLName xml.Name `xml:"Quota"`
|
||||
Quota int64 `xml:"StorageQuota"`
|
||||
}
|
||||
|
||||
// Grantee defines grantee properties
|
||||
type Grantee struct {
|
||||
XMLName xml.Name `xml:"Grantee"`
|
||||
Type GranteeType `xml:"type,attr"`
|
||||
ID string `xml:"ID,omitempty"`
|
||||
DisplayName string `xml:"DisplayName,omitempty"`
|
||||
URI GroupUriType `xml:"URI,omitempty"`
|
||||
}
|
||||
|
||||
type granteeObs struct {
|
||||
XMLName xml.Name `xml:"Grantee"`
|
||||
Type GranteeType `xml:"type,attr"`
|
||||
ID string `xml:"ID,omitempty"`
|
||||
DisplayName string `xml:"DisplayName,omitempty"`
|
||||
Canned string `xml:"Canned,omitempty"`
|
||||
}
|
||||
|
||||
// Grant defines grant properties
|
||||
type Grant struct {
|
||||
XMLName xml.Name `xml:"Grant"`
|
||||
Grantee Grantee `xml:"Grantee"`
|
||||
Permission PermissionType `xml:"Permission"`
|
||||
Delivered bool `xml:"Delivered"`
|
||||
}
|
||||
|
||||
type grantObs struct {
|
||||
XMLName xml.Name `xml:"Grant"`
|
||||
Grantee granteeObs `xml:"Grantee"`
|
||||
Permission PermissionType `xml:"Permission"`
|
||||
Delivered bool `xml:"Delivered"`
|
||||
}
|
||||
|
||||
// AccessControlPolicy defines access control policy properties
|
||||
type AccessControlPolicy struct {
|
||||
XMLName xml.Name `xml:"AccessControlPolicy"`
|
||||
Owner Owner `xml:"Owner"`
|
||||
Grants []Grant `xml:"AccessControlList>Grant"`
|
||||
Delivered string `xml:"Delivered,omitempty"`
|
||||
}
|
||||
|
||||
type accessControlPolicyObs struct {
|
||||
XMLName xml.Name `xml:"AccessControlPolicy"`
|
||||
Owner Owner `xml:"Owner"`
|
||||
Grants []grantObs `xml:"AccessControlList>Grant"`
|
||||
}
|
||||
|
||||
// CorsRule defines the CORS rules
|
||||
type CorsRule struct {
|
||||
XMLName xml.Name `xml:"CORSRule"`
|
||||
ID string `xml:"ID,omitempty"`
|
||||
AllowedOrigin []string `xml:"AllowedOrigin"`
|
||||
AllowedMethod []string `xml:"AllowedMethod"`
|
||||
AllowedHeader []string `xml:"AllowedHeader,omitempty"`
|
||||
MaxAgeSeconds int `xml:"MaxAgeSeconds"`
|
||||
ExposeHeader []string `xml:"ExposeHeader,omitempty"`
|
||||
}
|
||||
|
||||
// BucketCors defines the bucket CORS configuration
|
||||
type BucketCors struct {
|
||||
XMLName xml.Name `xml:"CORSConfiguration"`
|
||||
CorsRules []CorsRule `xml:"CORSRule"`
|
||||
}
|
||||
|
||||
// BucketVersioningConfiguration defines the versioning configuration
|
||||
type BucketVersioningConfiguration struct {
|
||||
XMLName xml.Name `xml:"VersioningConfiguration"`
|
||||
Status VersioningStatusType `xml:"Status"`
|
||||
}
|
||||
|
||||
// IndexDocument defines the default page configuration
|
||||
type IndexDocument struct {
|
||||
Suffix string `xml:"Suffix"`
|
||||
}
|
||||
|
||||
// ErrorDocument defines the error page configuration
|
||||
type ErrorDocument struct {
|
||||
Key string `xml:"Key,omitempty"`
|
||||
}
|
||||
|
||||
// Condition defines condition in RoutingRule
|
||||
type Condition struct {
|
||||
XMLName xml.Name `xml:"Condition"`
|
||||
KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"`
|
||||
HttpErrorCodeReturnedEquals string `xml:"HttpErrorCodeReturnedEquals,omitempty"`
|
||||
}
|
||||
|
||||
// Redirect defines redirect in RoutingRule
|
||||
type Redirect struct {
|
||||
XMLName xml.Name `xml:"Redirect"`
|
||||
Protocol ProtocolType `xml:"Protocol,omitempty"`
|
||||
HostName string `xml:"HostName,omitempty"`
|
||||
ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"`
|
||||
ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"`
|
||||
HttpRedirectCode string `xml:"HttpRedirectCode,omitempty"`
|
||||
}
|
||||
|
||||
// RoutingRule defines routing rules
|
||||
type RoutingRule struct {
|
||||
XMLName xml.Name `xml:"RoutingRule"`
|
||||
Condition Condition `xml:"Condition,omitempty"`
|
||||
Redirect Redirect `xml:"Redirect"`
|
||||
}
|
||||
|
||||
// RedirectAllRequestsTo defines redirect in BucketWebsiteConfiguration
|
||||
type RedirectAllRequestsTo struct {
|
||||
XMLName xml.Name `xml:"RedirectAllRequestsTo"`
|
||||
Protocol ProtocolType `xml:"Protocol,omitempty"`
|
||||
HostName string `xml:"HostName"`
|
||||
}
|
||||
|
||||
// BucketWebsiteConfiguration defines the bucket website configuration
|
||||
type BucketWebsiteConfiguration struct {
|
||||
XMLName xml.Name `xml:"WebsiteConfiguration"`
|
||||
RedirectAllRequestsTo RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"`
|
||||
IndexDocument IndexDocument `xml:"IndexDocument,omitempty"`
|
||||
ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"`
|
||||
RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"`
|
||||
}
|
||||
|
||||
// BucketLoggingStatus defines the bucket logging configuration
|
||||
type BucketLoggingStatus struct {
|
||||
XMLName xml.Name `xml:"BucketLoggingStatus"`
|
||||
Agency string `xml:"Agency,omitempty"`
|
||||
TargetBucket string `xml:"LoggingEnabled>TargetBucket,omitempty"`
|
||||
TargetPrefix string `xml:"LoggingEnabled>TargetPrefix,omitempty"`
|
||||
TargetGrants []Grant `xml:"LoggingEnabled>TargetGrants>Grant,omitempty"`
|
||||
}
|
||||
|
||||
// Transition defines transition property in LifecycleRule
|
||||
type Transition struct {
|
||||
XMLName xml.Name `xml:"Transition"`
|
||||
Date time.Time `xml:"Date,omitempty"`
|
||||
Days int `xml:"Days,omitempty"`
|
||||
StorageClass StorageClassType `xml:"StorageClass"`
|
||||
}
|
||||
|
||||
// Expiration defines expiration property in LifecycleRule
|
||||
type Expiration struct {
|
||||
XMLName xml.Name `xml:"Expiration"`
|
||||
Date time.Time `xml:"Date,omitempty"`
|
||||
Days int `xml:"Days,omitempty"`
|
||||
ExpiredObjectDeleteMarker string `xml:"ExpiredObjectDeleteMarker,omitempty"`
|
||||
}
|
||||
|
||||
// NoncurrentVersionTransition defines noncurrentVersion transition property in LifecycleRule
|
||||
type NoncurrentVersionTransition struct {
|
||||
XMLName xml.Name `xml:"NoncurrentVersionTransition"`
|
||||
NoncurrentDays int `xml:"NoncurrentDays"`
|
||||
StorageClass StorageClassType `xml:"StorageClass"`
|
||||
}
|
||||
|
||||
// NoncurrentVersionExpiration defines noncurrentVersion expiration property in LifecycleRule
|
||||
type NoncurrentVersionExpiration struct {
|
||||
XMLName xml.Name `xml:"NoncurrentVersionExpiration"`
|
||||
NoncurrentDays int `xml:"NoncurrentDays"`
|
||||
}
|
||||
|
||||
// AbortIncompleteMultipartUpload defines abortIncomplete expiration property in LifecycleRule
|
||||
type AbortIncompleteMultipartUpload struct {
|
||||
XMLName xml.Name `xml:"AbortIncompleteMultipartUpload"`
|
||||
DaysAfterInitiation int `xml:"DaysAfterInitiation"`
|
||||
}
|
||||
|
||||
// LifecycleRule defines lifecycle rule
|
||||
type LifecycleRule struct {
|
||||
ID string `xml:"ID,omitempty"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Status RuleStatusType `xml:"Status"`
|
||||
Transitions []Transition `xml:"Transition,omitempty"`
|
||||
Expiration Expiration `xml:"Expiration,omitempty"`
|
||||
NoncurrentVersionTransitions []NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"`
|
||||
NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
|
||||
AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"`
|
||||
Filter LifecycleFilter `xml:"Filter,omitempty"`
|
||||
}
|
||||
|
||||
type LifecycleFilter struct {
|
||||
XMLName xml.Name `xml:"Filter"`
|
||||
Prefix string `xml:"And>Prefix,omitempty"`
|
||||
Tags []Tag `xml:"And>Tag,omitempty"`
|
||||
}
|
||||
|
||||
// BucketEncryptionConfiguration defines the bucket encryption configuration
|
||||
type BucketEncryptionConfiguration struct {
|
||||
XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"`
|
||||
SSEAlgorithm string `xml:"Rule>ApplyServerSideEncryptionByDefault>SSEAlgorithm"`
|
||||
KMSMasterKeyID string `xml:"Rule>ApplyServerSideEncryptionByDefault>KMSMasterKeyID,omitempty"`
|
||||
ProjectID string `xml:"Rule>ApplyServerSideEncryptionByDefault>ProjectID,omitempty"`
|
||||
}
|
||||
|
||||
// Tag defines tag property in BucketTagging
|
||||
type Tag struct {
|
||||
XMLName xml.Name `xml:"Tag"`
|
||||
Key string `xml:"Key"`
|
||||
Value string `xml:"Value"`
|
||||
}
|
||||
|
||||
// BucketTagging defines the bucket tag configuration
|
||||
type BucketTagging struct {
|
||||
XMLName xml.Name `xml:"Tagging"`
|
||||
Tags []Tag `xml:"TagSet>Tag"`
|
||||
}
|
||||
|
||||
// FilterRule defines filter rule in TopicConfiguration
|
||||
type FilterRule struct {
|
||||
XMLName xml.Name `xml:"FilterRule"`
|
||||
Name string `xml:"Name,omitempty"`
|
||||
Value string `xml:"Value,omitempty"`
|
||||
}
|
||||
|
||||
// TopicConfiguration defines the topic configuration
|
||||
type TopicConfiguration struct {
|
||||
XMLName xml.Name `xml:"TopicConfiguration"`
|
||||
ID string `xml:"Id,omitempty"`
|
||||
Topic string `xml:"Topic"`
|
||||
Events []EventType `xml:"Event"`
|
||||
FilterRules []FilterRule `xml:"Filter>Object>FilterRule"`
|
||||
}
|
||||
|
||||
// BucketNotification defines the bucket notification configuration
|
||||
type BucketNotification struct {
|
||||
XMLName xml.Name `xml:"NotificationConfiguration"`
|
||||
TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration"`
|
||||
}
|
||||
|
||||
type topicConfigurationS3 struct {
|
||||
XMLName xml.Name `xml:"TopicConfiguration"`
|
||||
ID string `xml:"Id,omitempty"`
|
||||
Topic string `xml:"Topic"`
|
||||
Events []string `xml:"Event"`
|
||||
FilterRules []FilterRule `xml:"Filter>S3Key>FilterRule"`
|
||||
}
|
||||
|
||||
type bucketNotificationS3 struct {
|
||||
XMLName xml.Name `xml:"NotificationConfiguration"`
|
||||
TopicConfigurations []topicConfigurationS3 `xml:"TopicConfiguration"`
|
||||
}
|
||||
|
||||
// ObjectToDelete defines the object property in DeleteObjectsInput
|
||||
type ObjectToDelete struct {
|
||||
XMLName xml.Name `xml:"Object"`
|
||||
Key string `xml:"Key"`
|
||||
VersionId string `xml:"VersionId,omitempty"`
|
||||
}
|
||||
|
||||
// Deleted defines the deleted property in DeleteObjectsOutput
|
||||
type Deleted struct {
|
||||
XMLName xml.Name `xml:"Deleted"`
|
||||
Key string `xml:"Key"`
|
||||
VersionId string `xml:"VersionId"`
|
||||
DeleteMarker bool `xml:"DeleteMarker"`
|
||||
DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"`
|
||||
}
|
||||
|
||||
// Part defines the part properties
|
||||
type Part struct {
|
||||
XMLName xml.Name `xml:"Part"`
|
||||
PartNumber int `xml:"PartNumber"`
|
||||
ETag string `xml:"ETag"`
|
||||
LastModified time.Time `xml:"LastModified,omitempty"`
|
||||
Size int64 `xml:"Size,omitempty"`
|
||||
}
|
||||
|
||||
// BucketPayer defines the request payment configuration
|
||||
type BucketPayer struct {
|
||||
XMLName xml.Name `xml:"RequestPaymentConfiguration"`
|
||||
Payer PayerType `xml:"Payer"`
|
||||
}
|
||||
|
||||
// HttpHeader defines the standard metadata
|
||||
type HttpHeader struct {
|
||||
CacheControl string
|
||||
ContentDisposition string
|
||||
ContentEncoding string
|
||||
ContentLanguage string
|
||||
ContentType string
|
||||
HttpExpires string
|
||||
}
|
407
myhwoss/obs/model_bucket.go
Normal file
407
myhwoss/obs/model_bucket.go
Normal file
@@ -0,0 +1,407 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
// DeleteBucketCustomDomainInput is the input parameter of DeleteBucketCustomDomain function
|
||||
type DeleteBucketCustomDomainInput struct {
|
||||
Bucket string
|
||||
CustomDomain string
|
||||
}
|
||||
|
||||
// GetBucketCustomDomainOuput is the result of GetBucketCustomdomain function
|
||||
type GetBucketCustomDomainOuput struct {
|
||||
BaseModel
|
||||
Domains []Domain `xml:"Domains"`
|
||||
}
|
||||
|
||||
// SetBucketCustomDomainInput is the input parameter of SetBucketCustomDomain function
|
||||
type SetBucketCustomDomainInput struct {
|
||||
Bucket string
|
||||
CustomDomain string
|
||||
}
|
||||
|
||||
// GetBucketMirrorBackToSourceOuput is the result of GetBucketMirrorBackToSource function
|
||||
type GetBucketMirrorBackToSourceOuput struct {
|
||||
BaseModel
|
||||
Rules string `json:"body"`
|
||||
}
|
||||
|
||||
type SetBucketMirrorBackToSourceInput struct {
|
||||
Bucket string
|
||||
Rules string `json:"body"`
|
||||
}
|
||||
|
||||
// Content defines the object content properties
|
||||
type Domain struct {
|
||||
DomainName string `xml:"DomainName"`
|
||||
CreateTime string `xml:"CreateTime"`
|
||||
}
|
||||
|
||||
// ListBucketsInput is the input parameter of ListBuckets function
|
||||
type ListBucketsInput struct {
|
||||
QueryLocation bool
|
||||
BucketType BucketType
|
||||
MaxKeys int
|
||||
Marker string
|
||||
}
|
||||
|
||||
// ListBucketsOutput is the result of ListBuckets function
|
||||
type ListBucketsOutput struct {
|
||||
BaseModel
|
||||
XMLName xml.Name `xml:"ListAllMyBucketsResult"`
|
||||
Owner Owner `xml:"Owner"`
|
||||
Buckets []Bucket `xml:"Buckets>Bucket"`
|
||||
IsTruncated bool `xml:"IsTruncated"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxKeys int `xml:"MaxKeys"`
|
||||
}
|
||||
|
||||
// CreateBucketInput is the input parameter of CreateBucket function
|
||||
type CreateBucketInput struct {
|
||||
BucketLocation
|
||||
Bucket string `xml:"-"`
|
||||
ACL AclType `xml:"-"`
|
||||
StorageClass StorageClassType `xml:"-"`
|
||||
GrantReadId string `xml:"-"`
|
||||
GrantWriteId string `xml:"-"`
|
||||
GrantReadAcpId string `xml:"-"`
|
||||
GrantWriteAcpId string `xml:"-"`
|
||||
GrantFullControlId string `xml:"-"`
|
||||
GrantReadDeliveredId string `xml:"-"`
|
||||
GrantFullControlDeliveredId string `xml:"-"`
|
||||
Epid string `xml:"-"`
|
||||
AvailableZone string `xml:"-"`
|
||||
IsFSFileInterface bool `xml:"-"`
|
||||
BucketRedundancy BucketRedundancyType `xml:"-"`
|
||||
IsFusionAllowUpgrade bool `xml:"-"`
|
||||
IsRedundancyAllowALT bool `xml:"-"`
|
||||
}
|
||||
|
||||
// SetBucketStoragePolicyInput is the input parameter of SetBucketStoragePolicy function
|
||||
type SetBucketStoragePolicyInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
BucketStoragePolicy
|
||||
}
|
||||
|
||||
type getBucketStoragePolicyOutputS3 struct {
|
||||
BaseModel
|
||||
BucketStoragePolicy
|
||||
}
|
||||
|
||||
// GetBucketStoragePolicyOutput is the result of GetBucketStoragePolicy function
|
||||
type GetBucketStoragePolicyOutput struct {
|
||||
BaseModel
|
||||
StorageClass string
|
||||
}
|
||||
|
||||
type getBucketStoragePolicyOutputObs struct {
|
||||
BaseModel
|
||||
bucketStoragePolicyObs
|
||||
}
|
||||
|
||||
// SetBucketQuotaInput is the input parameter of SetBucketQuota function
|
||||
type SetBucketQuotaInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
BucketQuota
|
||||
}
|
||||
|
||||
// GetBucketQuotaOutput is the result of GetBucketQuota function
|
||||
type GetBucketQuotaOutput struct {
|
||||
BaseModel
|
||||
BucketQuota
|
||||
}
|
||||
|
||||
// GetBucketStorageInfoOutput is the result of GetBucketStorageInfo function
|
||||
type GetBucketStorageInfoOutput struct {
|
||||
BaseModel
|
||||
XMLName xml.Name `xml:"GetBucketStorageInfoResult"`
|
||||
Size int64 `xml:"Size"`
|
||||
ObjectNumber int `xml:"ObjectNumber"`
|
||||
}
|
||||
|
||||
type getBucketLocationOutputS3 struct {
|
||||
BaseModel
|
||||
BucketLocation
|
||||
}
|
||||
type getBucketLocationOutputObs struct {
|
||||
BaseModel
|
||||
bucketLocationObs
|
||||
}
|
||||
|
||||
// GetBucketLocationOutput is the result of GetBucketLocation function
|
||||
type GetBucketLocationOutput struct {
|
||||
BaseModel
|
||||
Location string `xml:"-"`
|
||||
}
|
||||
|
||||
// GetBucketAclOutput is the result of GetBucketAcl function
|
||||
type GetBucketAclOutput struct {
|
||||
BaseModel
|
||||
AccessControlPolicy
|
||||
}
|
||||
|
||||
type getBucketACLOutputObs struct {
|
||||
BaseModel
|
||||
accessControlPolicyObs
|
||||
}
|
||||
|
||||
// SetBucketAclInput is the input parameter of SetBucketAcl function
|
||||
type SetBucketAclInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
ACL AclType `xml:"-"`
|
||||
AccessControlPolicy
|
||||
}
|
||||
|
||||
// SetBucketPolicyInput is the input parameter of SetBucketPolicy function
|
||||
type SetBucketPolicyInput struct {
|
||||
Bucket string
|
||||
Policy string
|
||||
}
|
||||
|
||||
// GetBucketPolicyOutput is the result of GetBucketPolicy function
|
||||
type GetBucketPolicyOutput struct {
|
||||
BaseModel
|
||||
Policy string `json:"body"`
|
||||
}
|
||||
|
||||
// SetBucketCorsInput is the input parameter of SetBucketCors function
|
||||
type SetBucketCorsInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
BucketCors
|
||||
}
|
||||
|
||||
// GetBucketCorsOutput is the result of GetBucketCors function
|
||||
type GetBucketCorsOutput struct {
|
||||
BaseModel
|
||||
BucketCors
|
||||
}
|
||||
|
||||
// SetBucketVersioningInput is the input parameter of SetBucketVersioning function
|
||||
type SetBucketVersioningInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
BucketVersioningConfiguration
|
||||
}
|
||||
|
||||
// GetBucketVersioningOutput is the result of GetBucketVersioning function
|
||||
type GetBucketVersioningOutput struct {
|
||||
BaseModel
|
||||
BucketVersioningConfiguration
|
||||
}
|
||||
|
||||
// SetBucketWebsiteConfigurationInput is the input parameter of SetBucketWebsiteConfiguration function
|
||||
type SetBucketWebsiteConfigurationInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
BucketWebsiteConfiguration
|
||||
}
|
||||
|
||||
// GetBucketWebsiteConfigurationOutput is the result of GetBucketWebsiteConfiguration function
|
||||
type GetBucketWebsiteConfigurationOutput struct {
|
||||
BaseModel
|
||||
BucketWebsiteConfiguration
|
||||
}
|
||||
|
||||
// GetBucketMetadataInput is the input parameter of GetBucketMetadata function
|
||||
type GetBucketMetadataInput struct {
|
||||
Bucket string
|
||||
Origin string
|
||||
RequestHeader string
|
||||
}
|
||||
|
||||
// SetObjectMetadataInput is the input parameter of SetObjectMetadata function
|
||||
type SetObjectMetadataInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
VersionId string
|
||||
MetadataDirective MetadataDirectiveType
|
||||
Expires string
|
||||
WebsiteRedirectLocation string
|
||||
StorageClass StorageClassType
|
||||
Metadata map[string]string
|
||||
HttpHeader
|
||||
}
|
||||
|
||||
//SetObjectMetadataOutput is the result of SetObjectMetadata function
|
||||
type SetObjectMetadataOutput struct {
|
||||
BaseModel
|
||||
MetadataDirective MetadataDirectiveType
|
||||
CacheControl string
|
||||
ContentDisposition string
|
||||
ContentEncoding string
|
||||
ContentLanguage string
|
||||
ContentType string
|
||||
Expires string
|
||||
WebsiteRedirectLocation string
|
||||
StorageClass StorageClassType
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
// GetBucketMetadataOutput is the result of GetBucketMetadata function
|
||||
type GetBucketMetadataOutput struct {
|
||||
BaseModel
|
||||
StorageClass StorageClassType
|
||||
Location string
|
||||
Version string
|
||||
AllowOrigin string
|
||||
AllowMethod string
|
||||
AllowHeader string
|
||||
MaxAgeSeconds int
|
||||
ExposeHeader string
|
||||
Epid string
|
||||
AZRedundancy AvailableZoneType
|
||||
FSStatus FSStatusType
|
||||
BucketRedundancy BucketRedundancyType
|
||||
}
|
||||
|
||||
// SetBucketLoggingConfigurationInput is the input parameter of SetBucketLoggingConfiguration function
|
||||
type SetBucketLoggingConfigurationInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
BucketLoggingStatus
|
||||
}
|
||||
|
||||
// GetBucketLoggingConfigurationOutput is the result of GetBucketLoggingConfiguration function
|
||||
type GetBucketLoggingConfigurationOutput struct {
|
||||
BaseModel
|
||||
BucketLoggingStatus
|
||||
}
|
||||
|
||||
// BucketLifecyleConfiguration defines the bucket lifecycle configuration
|
||||
type BucketLifecyleConfiguration struct {
|
||||
XMLName xml.Name `xml:"LifecycleConfiguration"`
|
||||
LifecycleRules []LifecycleRule `xml:"Rule"`
|
||||
}
|
||||
|
||||
// SetBucketLifecycleConfigurationInput is the input parameter of SetBucketLifecycleConfiguration function
|
||||
type SetBucketLifecycleConfigurationInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
BucketLifecyleConfiguration
|
||||
}
|
||||
|
||||
// GetBucketLifecycleConfigurationOutput is the result of GetBucketLifecycleConfiguration function
|
||||
type GetBucketLifecycleConfigurationOutput struct {
|
||||
BaseModel
|
||||
BucketLifecyleConfiguration
|
||||
}
|
||||
|
||||
// SetBucketEncryptionInput is the input parameter of SetBucketEncryption function
|
||||
type SetBucketEncryptionInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
BucketEncryptionConfiguration
|
||||
}
|
||||
|
||||
// GetBucketEncryptionOutput is the result of GetBucketEncryption function
|
||||
type GetBucketEncryptionOutput struct {
|
||||
BaseModel
|
||||
BucketEncryptionConfiguration
|
||||
}
|
||||
|
||||
// SetBucketTaggingInput is the input parameter of SetBucketTagging function
|
||||
type SetBucketTaggingInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
BucketTagging
|
||||
}
|
||||
|
||||
// GetBucketTaggingOutput is the result of GetBucketTagging function
|
||||
type GetBucketTaggingOutput struct {
|
||||
BaseModel
|
||||
BucketTagging
|
||||
}
|
||||
|
||||
// SetBucketNotificationInput is the input parameter of SetBucketNotification function
|
||||
type SetBucketNotificationInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
BucketNotification
|
||||
}
|
||||
|
||||
type getBucketNotificationOutputS3 struct {
|
||||
BaseModel
|
||||
bucketNotificationS3
|
||||
}
|
||||
|
||||
// GetBucketNotificationOutput is the result of GetBucketNotification function
|
||||
type GetBucketNotificationOutput struct {
|
||||
BaseModel
|
||||
BucketNotification
|
||||
}
|
||||
|
||||
// SetBucketFetchPolicyInput is the input parameter of SetBucketFetchPolicy function
|
||||
type SetBucketFetchPolicyInput struct {
|
||||
Bucket string
|
||||
Status FetchPolicyStatusType `json:"status"`
|
||||
Agency string `json:"agency"`
|
||||
}
|
||||
|
||||
// GetBucketFetchPolicyInput is the input parameter of GetBucketFetchPolicy function
|
||||
type GetBucketFetchPolicyInput struct {
|
||||
Bucket string
|
||||
}
|
||||
|
||||
// GetBucketFetchPolicyOutput is the result of GetBucketFetchPolicy function
|
||||
type GetBucketFetchPolicyOutput struct {
|
||||
BaseModel
|
||||
FetchResponse `json:"fetch"`
|
||||
}
|
||||
|
||||
// DeleteBucketFetchPolicyInput is the input parameter of DeleteBucketFetchPolicy function
|
||||
type DeleteBucketFetchPolicyInput struct {
|
||||
Bucket string
|
||||
}
|
||||
|
||||
// SetBucketFetchJobInput is the input parameter of SetBucketFetchJob function
|
||||
type SetBucketFetchJobInput struct {
|
||||
Bucket string `json:"bucket"`
|
||||
URL string `json:"url"`
|
||||
Host string `json:"host,omitempty"`
|
||||
Key string `json:"key,omitempty"`
|
||||
Md5 string `json:"md5,omitempty"`
|
||||
CallBackURL string `json:"callbackurl,omitempty"`
|
||||
CallBackBody string `json:"callbackbody,omitempty"`
|
||||
CallBackBodyType string `json:"callbackbodytype,omitempty"`
|
||||
CallBackHost string `json:"callbackhost,omitempty"`
|
||||
FileType string `json:"file_type,omitempty"`
|
||||
IgnoreSameKey bool `json:"ignore_same_key,omitempty"`
|
||||
ObjectHeaders map[string]string `json:"objectheaders,omitempty"`
|
||||
Etag string `json:"etag,omitempty"`
|
||||
TrustName string `json:"trustname,omitempty"`
|
||||
}
|
||||
|
||||
// SetBucketFetchJobOutput is the result of SetBucketFetchJob function
|
||||
type SetBucketFetchJobOutput struct {
|
||||
BaseModel
|
||||
SetBucketFetchJobResponse
|
||||
}
|
||||
|
||||
// GetBucketFetchJobInput is the input parameter of GetBucketFetchJob function
|
||||
type GetBucketFetchJobInput struct {
|
||||
Bucket string
|
||||
JobID string
|
||||
}
|
||||
|
||||
// GetBucketFetchJobOutput is the result of GetBucketFetchJob function
|
||||
type GetBucketFetchJobOutput struct {
|
||||
BaseModel
|
||||
GetBucketFetchJobResponse
|
||||
}
|
||||
|
||||
type GetBucketFSStatusInput struct {
|
||||
GetBucketMetadataInput
|
||||
}
|
||||
|
||||
type GetBucketFSStatusOutput struct {
|
||||
GetBucketMetadataOutput
|
||||
FSStatus FSStatusType
|
||||
}
|
33
myhwoss/obs/model_header.go
Normal file
33
myhwoss/obs/model_header.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
// ISseHeader defines the sse encryption header
|
||||
type ISseHeader interface {
|
||||
GetEncryption() string
|
||||
GetKey() string
|
||||
}
|
||||
|
||||
// SseKmsHeader defines the SseKms header
|
||||
type SseKmsHeader struct {
|
||||
Encryption string
|
||||
Key string
|
||||
isObs bool
|
||||
}
|
||||
|
||||
// SseCHeader defines the SseC header
|
||||
type SseCHeader struct {
|
||||
Encryption string
|
||||
Key string
|
||||
KeyMD5 string
|
||||
}
|
391
myhwoss/obs/model_object.go
Normal file
391
myhwoss/obs/model_object.go
Normal file
@@ -0,0 +1,391 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ListObjsInput defines parameters for listing objects
|
||||
type ListObjsInput struct {
|
||||
Prefix string
|
||||
MaxKeys int
|
||||
Delimiter string
|
||||
Origin string
|
||||
RequestHeader string
|
||||
EncodingType string
|
||||
}
|
||||
|
||||
// ListObjectsInput is the input parameter of ListObjects function
|
||||
type ListObjectsInput struct {
|
||||
ListObjsInput
|
||||
Bucket string
|
||||
Marker string
|
||||
}
|
||||
|
||||
// ListObjectsOutput is the result of ListObjects function
|
||||
type ListObjectsOutput struct {
|
||||
BaseModel
|
||||
XMLName xml.Name `xml:"ListBucketResult"`
|
||||
Delimiter string `xml:"Delimiter"`
|
||||
IsTruncated bool `xml:"IsTruncated"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxKeys int `xml:"MaxKeys"`
|
||||
Name string `xml:"Name"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Contents []Content `xml:"Contents"`
|
||||
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
|
||||
Location string `xml:"-"`
|
||||
EncodingType string `xml:"EncodingType,omitempty"`
|
||||
}
|
||||
|
||||
// ListVersionsInput is the input parameter of ListVersions function
|
||||
type ListVersionsInput struct {
|
||||
ListObjsInput
|
||||
Bucket string
|
||||
KeyMarker string
|
||||
VersionIdMarker string
|
||||
}
|
||||
|
||||
// ListVersionsOutput is the result of ListVersions function
|
||||
type ListVersionsOutput struct {
|
||||
BaseModel
|
||||
XMLName xml.Name `xml:"ListVersionsResult"`
|
||||
Delimiter string `xml:"Delimiter"`
|
||||
IsTruncated bool `xml:"IsTruncated"`
|
||||
KeyMarker string `xml:"KeyMarker"`
|
||||
NextKeyMarker string `xml:"NextKeyMarker"`
|
||||
VersionIdMarker string `xml:"VersionIdMarker"`
|
||||
NextVersionIdMarker string `xml:"NextVersionIdMarker"`
|
||||
MaxKeys int `xml:"MaxKeys"`
|
||||
Name string `xml:"Name"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Versions []Version `xml:"Version"`
|
||||
DeleteMarkers []DeleteMarker `xml:"DeleteMarker"`
|
||||
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
|
||||
Location string `xml:"-"`
|
||||
EncodingType string `xml:"EncodingType,omitempty"`
|
||||
}
|
||||
|
||||
// DeleteObjectInput is the input parameter of DeleteObject function
|
||||
type DeleteObjectInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
VersionId string
|
||||
}
|
||||
|
||||
// DeleteObjectOutput is the result of DeleteObject function
|
||||
type DeleteObjectOutput struct {
|
||||
BaseModel
|
||||
VersionId string
|
||||
DeleteMarker bool
|
||||
}
|
||||
|
||||
// DeleteObjectsInput is the input parameter of DeleteObjects function
|
||||
type DeleteObjectsInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
XMLName xml.Name `xml:"Delete"`
|
||||
Quiet bool `xml:"Quiet,omitempty"`
|
||||
Objects []ObjectToDelete `xml:"Object"`
|
||||
EncodingType string `xml:"EncodingType"`
|
||||
}
|
||||
|
||||
// DeleteObjectsOutput is the result of DeleteObjects function
|
||||
type DeleteObjectsOutput struct {
|
||||
BaseModel
|
||||
XMLName xml.Name `xml:"DeleteResult"`
|
||||
Deleteds []Deleted `xml:"Deleted"`
|
||||
Errors []Error `xml:"Error"`
|
||||
EncodingType string `xml:"EncodingType,omitempty"`
|
||||
}
|
||||
|
||||
// SetObjectAclInput is the input parameter of SetObjectAcl function
|
||||
type SetObjectAclInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
Key string `xml:"-"`
|
||||
VersionId string `xml:"-"`
|
||||
ACL AclType `xml:"-"`
|
||||
AccessControlPolicy
|
||||
}
|
||||
|
||||
// GetObjectAclInput is the input parameter of GetObjectAcl function
|
||||
type GetObjectAclInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
VersionId string
|
||||
}
|
||||
|
||||
// GetObjectAclOutput is the result of GetObjectAcl function
|
||||
type GetObjectAclOutput struct {
|
||||
BaseModel
|
||||
VersionId string
|
||||
AccessControlPolicy
|
||||
}
|
||||
|
||||
// RestoreObjectInput is the input parameter of RestoreObject function
|
||||
type RestoreObjectInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
Key string `xml:"-"`
|
||||
VersionId string `xml:"-"`
|
||||
XMLName xml.Name `xml:"RestoreRequest"`
|
||||
Days int `xml:"Days"`
|
||||
Tier RestoreTierType `xml:"GlacierJobParameters>Tier,omitempty"`
|
||||
}
|
||||
|
||||
// GetObjectMetadataInput is the input parameter of GetObjectMetadata function
|
||||
type GetObjectMetadataInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
VersionId string
|
||||
Origin string
|
||||
RequestHeader string
|
||||
SseHeader ISseHeader
|
||||
}
|
||||
|
||||
// GetObjectMetadataOutput is the result of GetObjectMetadata function
|
||||
type GetObjectMetadataOutput struct {
|
||||
BaseModel
|
||||
HttpHeader
|
||||
VersionId string
|
||||
WebsiteRedirectLocation string
|
||||
Expiration string
|
||||
Restore string
|
||||
ObjectType string
|
||||
NextAppendPosition string
|
||||
StorageClass StorageClassType
|
||||
ContentLength int64
|
||||
ContentType string
|
||||
ETag string
|
||||
AllowOrigin string
|
||||
AllowHeader string
|
||||
AllowMethod string
|
||||
ExposeHeader string
|
||||
MaxAgeSeconds int
|
||||
LastModified time.Time
|
||||
SseHeader ISseHeader
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
type GetAttributeInput struct {
|
||||
GetObjectMetadataInput
|
||||
RequestPayer string
|
||||
}
|
||||
|
||||
type GetAttributeOutput struct {
|
||||
GetObjectMetadataOutput
|
||||
Mode int
|
||||
}
|
||||
|
||||
// GetObjectInput is the input parameter of GetObject function
|
||||
type GetObjectInput struct {
|
||||
GetObjectMetadataInput
|
||||
IfMatch string
|
||||
IfNoneMatch string
|
||||
AcceptEncoding string
|
||||
IfUnmodifiedSince time.Time
|
||||
IfModifiedSince time.Time
|
||||
RangeStart int64
|
||||
RangeEnd int64
|
||||
ImageProcess string
|
||||
ResponseCacheControl string
|
||||
ResponseContentDisposition string
|
||||
ResponseContentEncoding string
|
||||
ResponseContentLanguage string
|
||||
ResponseContentType string
|
||||
ResponseExpires string
|
||||
}
|
||||
|
||||
// GetObjectOutput is the result of GetObject function
|
||||
type GetObjectOutput struct {
|
||||
GetObjectMetadataOutput
|
||||
DeleteMarker bool
|
||||
CacheControl string
|
||||
ContentDisposition string
|
||||
ContentEncoding string
|
||||
ContentLanguage string
|
||||
Expires string
|
||||
Body io.ReadCloser
|
||||
}
|
||||
|
||||
// ObjectOperationInput defines the object operation properties
|
||||
type ObjectOperationInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
ACL AclType
|
||||
GrantReadId string
|
||||
GrantReadAcpId string
|
||||
GrantWriteAcpId string
|
||||
GrantFullControlId string
|
||||
StorageClass StorageClassType
|
||||
WebsiteRedirectLocation string
|
||||
Expires int64
|
||||
SseHeader ISseHeader
|
||||
Metadata map[string]string
|
||||
HttpHeader
|
||||
}
|
||||
|
||||
// PutObjectBasicInput defines the basic object operation properties
|
||||
type PutObjectBasicInput struct {
|
||||
ObjectOperationInput
|
||||
ContentMD5 string
|
||||
ContentLength int64
|
||||
}
|
||||
|
||||
// PutObjectInput is the input parameter of PutObject function
|
||||
type PutObjectInput struct {
|
||||
PutObjectBasicInput
|
||||
Body io.Reader
|
||||
}
|
||||
|
||||
type NewFolderInput struct {
|
||||
ObjectOperationInput
|
||||
RequestPayer string
|
||||
}
|
||||
|
||||
type NewFolderOutput struct {
|
||||
PutObjectOutput
|
||||
}
|
||||
|
||||
// PutFileInput is the input parameter of PutFile function
|
||||
type PutFileInput struct {
|
||||
PutObjectBasicInput
|
||||
SourceFile string
|
||||
}
|
||||
|
||||
// PutObjectOutput is the result of PutObject function
|
||||
type PutObjectOutput struct {
|
||||
BaseModel
|
||||
VersionId string
|
||||
SseHeader ISseHeader
|
||||
StorageClass StorageClassType
|
||||
ETag string
|
||||
ObjectUrl string
|
||||
CallbackBody
|
||||
}
|
||||
|
||||
// CopyObjectInput is the input parameter of CopyObject function
|
||||
type CopyObjectInput struct {
|
||||
ObjectOperationInput
|
||||
CopySourceBucket string
|
||||
CopySourceKey string
|
||||
CopySourceVersionId string
|
||||
CopySourceIfMatch string
|
||||
CopySourceIfNoneMatch string
|
||||
CopySourceIfUnmodifiedSince time.Time
|
||||
CopySourceIfModifiedSince time.Time
|
||||
SourceSseHeader ISseHeader
|
||||
CacheControl string
|
||||
ContentDisposition string
|
||||
ContentEncoding string
|
||||
ContentLanguage string
|
||||
ContentType string
|
||||
Expires string
|
||||
MetadataDirective MetadataDirectiveType
|
||||
SuccessActionRedirect string
|
||||
}
|
||||
|
||||
// CopyObjectOutput is the result of CopyObject function
|
||||
type CopyObjectOutput struct {
|
||||
BaseModel
|
||||
CopySourceVersionId string `xml:"-"`
|
||||
VersionId string `xml:"-"`
|
||||
SseHeader ISseHeader `xml:"-"`
|
||||
XMLName xml.Name `xml:"CopyObjectResult"`
|
||||
LastModified time.Time `xml:"LastModified"`
|
||||
ETag string `xml:"ETag"`
|
||||
}
|
||||
|
||||
// UploadFileInput is the input parameter of UploadFile function
|
||||
type UploadFileInput struct {
|
||||
ObjectOperationInput
|
||||
ContentType string
|
||||
UploadFile string
|
||||
PartSize int64
|
||||
TaskNum int
|
||||
EnableCheckpoint bool
|
||||
CheckpointFile string
|
||||
EncodingType string
|
||||
}
|
||||
|
||||
// DownloadFileInput is the input parameter of DownloadFile function
|
||||
type DownloadFileInput struct {
|
||||
GetObjectMetadataInput
|
||||
IfMatch string
|
||||
IfNoneMatch string
|
||||
IfModifiedSince time.Time
|
||||
IfUnmodifiedSince time.Time
|
||||
DownloadFile string
|
||||
PartSize int64
|
||||
TaskNum int
|
||||
EnableCheckpoint bool
|
||||
CheckpointFile string
|
||||
}
|
||||
|
||||
type AppendObjectInput struct {
|
||||
PutObjectBasicInput
|
||||
Body io.Reader
|
||||
Position int64
|
||||
}
|
||||
|
||||
type AppendObjectOutput struct {
|
||||
BaseModel
|
||||
VersionId string
|
||||
SseHeader ISseHeader
|
||||
NextAppendPosition int64
|
||||
ETag string
|
||||
}
|
||||
|
||||
type ModifyObjectInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
Position int64
|
||||
Body io.Reader
|
||||
ContentLength int64
|
||||
}
|
||||
|
||||
type ModifyObjectOutput struct {
|
||||
BaseModel
|
||||
ETag string
|
||||
}
|
||||
|
||||
// HeadObjectInput is the input parameter of HeadObject function
|
||||
type HeadObjectInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
VersionId string
|
||||
}
|
||||
|
||||
type RenameFileInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
NewObjectKey string
|
||||
RequestPayer string
|
||||
}
|
||||
|
||||
type RenameFileOutput struct {
|
||||
BaseModel
|
||||
}
|
||||
|
||||
type RenameFolderInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
NewObjectKey string
|
||||
RequestPayer string
|
||||
}
|
||||
|
||||
type RenameFolderOutput struct {
|
||||
BaseModel
|
||||
}
|
65
myhwoss/obs/model_other.go
Normal file
65
myhwoss/obs/model_other.go
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// CreateSignedUrlInput is the input parameter of CreateSignedUrl function
|
||||
type CreateSignedUrlInput struct {
|
||||
Method HttpMethodType
|
||||
Bucket string
|
||||
Key string
|
||||
Policy string
|
||||
SubResource SubResourceType
|
||||
Expires int
|
||||
Headers map[string]string
|
||||
QueryParams map[string]string
|
||||
}
|
||||
|
||||
// CreateSignedUrlOutput is the result of CreateSignedUrl function
|
||||
type CreateSignedUrlOutput struct {
|
||||
SignedUrl string
|
||||
ActualSignedRequestHeaders http.Header
|
||||
}
|
||||
|
||||
// CreateBrowserBasedSignatureInput is the input parameter of CreateBrowserBasedSignature function.
|
||||
type CreateBrowserBasedSignatureInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
Expires int
|
||||
FormParams map[string]string
|
||||
}
|
||||
|
||||
// CreateBrowserBasedSignatureOutput is the result of CreateBrowserBasedSignature function.
|
||||
type CreateBrowserBasedSignatureOutput struct {
|
||||
OriginPolicy string
|
||||
Policy string
|
||||
Algorithm string
|
||||
Credential string
|
||||
Date string
|
||||
Signature string
|
||||
}
|
||||
|
||||
// SetBucketRequestPaymentInput is the input parameter of SetBucketRequestPayment function
|
||||
type SetBucketRequestPaymentInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
BucketPayer
|
||||
}
|
||||
|
||||
// GetBucketRequestPaymentOutput is the result of GetBucketRequestPayment function
|
||||
type GetBucketRequestPaymentOutput struct {
|
||||
BaseModel
|
||||
BucketPayer
|
||||
}
|
172
myhwoss/obs/model_part.go
Normal file
172
myhwoss/obs/model_part.go
Normal file
@@ -0,0 +1,172 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ListMultipartUploadsInput is the input parameter of ListMultipartUploads function
|
||||
type ListMultipartUploadsInput struct {
|
||||
Bucket string
|
||||
Prefix string
|
||||
MaxUploads int
|
||||
Delimiter string
|
||||
KeyMarker string
|
||||
UploadIdMarker string
|
||||
EncodingType string
|
||||
}
|
||||
|
||||
// ListMultipartUploadsOutput is the result of ListMultipartUploads function
|
||||
type ListMultipartUploadsOutput struct {
|
||||
BaseModel
|
||||
XMLName xml.Name `xml:"ListMultipartUploadsResult"`
|
||||
Bucket string `xml:"Bucket"`
|
||||
KeyMarker string `xml:"KeyMarker"`
|
||||
NextKeyMarker string `xml:"NextKeyMarker"`
|
||||
UploadIdMarker string `xml:"UploadIdMarker"`
|
||||
NextUploadIdMarker string `xml:"NextUploadIdMarker"`
|
||||
Delimiter string `xml:"Delimiter"`
|
||||
IsTruncated bool `xml:"IsTruncated"`
|
||||
MaxUploads int `xml:"MaxUploads"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Uploads []Upload `xml:"Upload"`
|
||||
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
|
||||
EncodingType string `xml:"EncodingType,omitempty"`
|
||||
}
|
||||
|
||||
// AbortMultipartUploadInput is the input parameter of AbortMultipartUpload function
|
||||
type AbortMultipartUploadInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
UploadId string
|
||||
}
|
||||
|
||||
// InitiateMultipartUploadInput is the input parameter of InitiateMultipartUpload function
|
||||
type InitiateMultipartUploadInput struct {
|
||||
ObjectOperationInput
|
||||
ContentType string
|
||||
EncodingType string
|
||||
}
|
||||
|
||||
// InitiateMultipartUploadOutput is the result of InitiateMultipartUpload function
|
||||
type InitiateMultipartUploadOutput struct {
|
||||
BaseModel
|
||||
XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
|
||||
Bucket string `xml:"Bucket"`
|
||||
Key string `xml:"Key"`
|
||||
UploadId string `xml:"UploadId"`
|
||||
SseHeader ISseHeader
|
||||
EncodingType string `xml:"EncodingType,omitempty"`
|
||||
}
|
||||
|
||||
// UploadPartInput is the input parameter of UploadPart function
|
||||
type UploadPartInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
PartNumber int
|
||||
UploadId string
|
||||
ContentMD5 string
|
||||
SseHeader ISseHeader
|
||||
Body io.Reader
|
||||
SourceFile string
|
||||
Offset int64
|
||||
PartSize int64
|
||||
}
|
||||
|
||||
// UploadPartOutput is the result of UploadPart function
|
||||
type UploadPartOutput struct {
|
||||
BaseModel
|
||||
PartNumber int
|
||||
ETag string
|
||||
SseHeader ISseHeader
|
||||
}
|
||||
|
||||
// CompleteMultipartUploadInput is the input parameter of CompleteMultipartUpload function
|
||||
type CompleteMultipartUploadInput struct {
|
||||
Bucket string `xml:"-"`
|
||||
Key string `xml:"-"`
|
||||
UploadId string `xml:"-"`
|
||||
XMLName xml.Name `xml:"CompleteMultipartUpload"`
|
||||
Parts []Part `xml:"Part"`
|
||||
EncodingType string `xml:"-"`
|
||||
}
|
||||
|
||||
// CompleteMultipartUploadOutput is the result of CompleteMultipartUpload function
|
||||
type CompleteMultipartUploadOutput struct {
|
||||
BaseModel
|
||||
VersionId string `xml:"-"`
|
||||
SseHeader ISseHeader `xml:"-"`
|
||||
XMLName xml.Name `xml:"CompleteMultipartUploadResult"`
|
||||
Location string `xml:"Location"`
|
||||
Bucket string `xml:"Bucket"`
|
||||
Key string `xml:"Key"`
|
||||
ETag string `xml:"ETag"`
|
||||
EncodingType string `xml:"EncodingType,omitempty"`
|
||||
CallbackBody
|
||||
}
|
||||
|
||||
// ListPartsInput is the input parameter of ListParts function
|
||||
type ListPartsInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
UploadId string
|
||||
MaxParts int
|
||||
PartNumberMarker int
|
||||
EncodingType string
|
||||
}
|
||||
|
||||
// ListPartsOutput is the result of ListParts function
|
||||
type ListPartsOutput struct {
|
||||
BaseModel
|
||||
XMLName xml.Name `xml:"ListPartsResult"`
|
||||
Bucket string `xml:"Bucket"`
|
||||
Key string `xml:"Key"`
|
||||
UploadId string `xml:"UploadId"`
|
||||
PartNumberMarker int `xml:"PartNumberMarker"`
|
||||
NextPartNumberMarker int `xml:"NextPartNumberMarker"`
|
||||
MaxParts int `xml:"MaxParts"`
|
||||
IsTruncated bool `xml:"IsTruncated"`
|
||||
StorageClass StorageClassType `xml:"StorageClass"`
|
||||
Initiator Initiator `xml:"Initiator"`
|
||||
Owner Owner `xml:"Owner"`
|
||||
Parts []Part `xml:"Part"`
|
||||
EncodingType string `xml:"EncodingType,omitempty"`
|
||||
}
|
||||
|
||||
// CopyPartInput is the input parameter of CopyPart function
|
||||
type CopyPartInput struct {
|
||||
Bucket string
|
||||
Key string
|
||||
UploadId string
|
||||
PartNumber int
|
||||
CopySourceBucket string
|
||||
CopySourceKey string
|
||||
CopySourceVersionId string
|
||||
CopySourceRangeStart int64
|
||||
CopySourceRangeEnd int64
|
||||
SseHeader ISseHeader
|
||||
SourceSseHeader ISseHeader
|
||||
}
|
||||
|
||||
// CopyPartOutput is the result of CopyPart function
|
||||
type CopyPartOutput struct {
|
||||
BaseModel
|
||||
XMLName xml.Name `xml:"CopyPartResult"`
|
||||
PartNumber int `xml:"-"`
|
||||
ETag string `xml:"ETag"`
|
||||
LastModified time.Time `xml:"LastModified"`
|
||||
SseHeader ISseHeader `xml:"-"`
|
||||
}
|
68
myhwoss/obs/model_response.go
Normal file
68
myhwoss/obs/model_response.go
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
// BaseModel defines base model response from OBS
|
||||
type BaseModel struct {
|
||||
StatusCode int `xml:"-"`
|
||||
RequestId string `xml:"RequestId" json:"request_id"`
|
||||
ResponseHeaders map[string][]string `xml:"-"`
|
||||
}
|
||||
|
||||
// Error defines the error property in DeleteObjectsOutput
|
||||
type Error struct {
|
||||
XMLName xml.Name `xml:"Error"`
|
||||
Key string `xml:"Key"`
|
||||
VersionId string `xml:"VersionId"`
|
||||
Code string `xml:"Code"`
|
||||
Message string `xml:"Message"`
|
||||
}
|
||||
|
||||
// FetchResponse defines the response fetch policy configuration
|
||||
type FetchResponse struct {
|
||||
Status FetchPolicyStatusType `json:"status"`
|
||||
Agency string `json:"agency"`
|
||||
}
|
||||
|
||||
// SetBucketFetchJobResponse defines the response SetBucketFetchJob configuration
|
||||
type SetBucketFetchJobResponse struct {
|
||||
ID string `json:"id"`
|
||||
Wait int `json:"Wait"`
|
||||
}
|
||||
|
||||
// GetBucketFetchJobResponse defines the response fetch job configuration
|
||||
type GetBucketFetchJobResponse struct {
|
||||
Err string `json:"err"`
|
||||
Code string `json:"code"`
|
||||
Status string `json:"status"`
|
||||
Job JobResponse `json:"job"`
|
||||
}
|
||||
|
||||
// JobResponse defines the response job configuration
|
||||
type JobResponse struct {
|
||||
Bucket string `json:"bucket"`
|
||||
URL string `json:"url"`
|
||||
Host string `json:"host"`
|
||||
Key string `json:"key"`
|
||||
Md5 string `json:"md5"`
|
||||
CallBackURL string `json:"callbackurl"`
|
||||
CallBackBody string `json:"callbackbody"`
|
||||
CallBackBodyType string `json:"callbackbodytype"`
|
||||
CallBackHost string `json:"callbackhost"`
|
||||
FileType string `json:"file_type"`
|
||||
IgnoreSameKey bool `json:"ignore_same_key"`
|
||||
}
|
542
myhwoss/obs/pool.go
Normal file
542
myhwoss/obs/pool.go
Normal file
@@ -0,0 +1,542 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
//nolint:structcheck, unused
|
||||
package obs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Future defines interface with function: Get
|
||||
type Future interface {
|
||||
Get() interface{}
|
||||
}
|
||||
|
||||
// FutureResult for task result
|
||||
type FutureResult struct {
|
||||
result interface{}
|
||||
resultChan chan interface{}
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
type panicResult struct {
|
||||
presult interface{}
|
||||
}
|
||||
|
||||
func (f *FutureResult) checkPanic() interface{} {
|
||||
if r, ok := f.result.(panicResult); ok {
|
||||
panic(r.presult)
|
||||
}
|
||||
return f.result
|
||||
}
|
||||
|
||||
// Get gets the task result
|
||||
func (f *FutureResult) Get() interface{} {
|
||||
if f.resultChan == nil {
|
||||
return f.checkPanic()
|
||||
}
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
if f.resultChan == nil {
|
||||
return f.checkPanic()
|
||||
}
|
||||
|
||||
f.result = <-f.resultChan
|
||||
close(f.resultChan)
|
||||
f.resultChan = nil
|
||||
return f.checkPanic()
|
||||
}
|
||||
|
||||
// Task defines interface with function: Run
|
||||
type Task interface {
|
||||
Run() interface{}
|
||||
}
|
||||
|
||||
type funcWrapper struct {
|
||||
f func() interface{}
|
||||
}
|
||||
|
||||
func (fw *funcWrapper) Run() interface{} {
|
||||
if fw.f != nil {
|
||||
return fw.f()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type taskWrapper struct {
|
||||
t Task
|
||||
f *FutureResult
|
||||
}
|
||||
|
||||
func (tw *taskWrapper) Run() interface{} {
|
||||
if tw.t != nil {
|
||||
return tw.t.Run()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type signalTask struct {
|
||||
id string
|
||||
}
|
||||
|
||||
func (signalTask) Run() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
type worker struct {
|
||||
name string
|
||||
taskQueue chan Task
|
||||
wg *sync.WaitGroup
|
||||
pool *RoutinePool
|
||||
}
|
||||
|
||||
func runTask(t Task) {
|
||||
if tw, ok := t.(*taskWrapper); ok {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
tw.f.resultChan <- panicResult{
|
||||
presult: r,
|
||||
}
|
||||
}
|
||||
}()
|
||||
ret := t.Run()
|
||||
tw.f.resultChan <- ret
|
||||
} else {
|
||||
t.Run()
|
||||
}
|
||||
}
|
||||
|
||||
func (*worker) runTask(t Task) {
|
||||
runTask(t)
|
||||
}
|
||||
|
||||
func (w *worker) start() {
|
||||
go func() {
|
||||
defer func() {
|
||||
if w.wg != nil {
|
||||
w.wg.Done()
|
||||
}
|
||||
}()
|
||||
for {
|
||||
task, ok := <-w.taskQueue
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
w.pool.AddCurrentWorkingCnt(1)
|
||||
w.runTask(task)
|
||||
w.pool.AddCurrentWorkingCnt(-1)
|
||||
if w.pool.autoTuneWorker(w) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (w *worker) release() {
|
||||
w.taskQueue = nil
|
||||
w.wg = nil
|
||||
w.pool = nil
|
||||
}
|
||||
|
||||
// Pool defines coroutine pool interface
|
||||
type Pool interface {
|
||||
ShutDown()
|
||||
Submit(t Task) (Future, error)
|
||||
SubmitFunc(f func() interface{}) (Future, error)
|
||||
Execute(t Task)
|
||||
ExecuteFunc(f func() interface{})
|
||||
GetMaxWorkerCnt() int64
|
||||
AddMaxWorkerCnt(value int64) int64
|
||||
GetCurrentWorkingCnt() int64
|
||||
AddCurrentWorkingCnt(value int64) int64
|
||||
GetWorkerCnt() int64
|
||||
AddWorkerCnt(value int64) int64
|
||||
EnableAutoTune()
|
||||
}
|
||||
|
||||
type basicPool struct {
|
||||
maxWorkerCnt int64
|
||||
workerCnt int64
|
||||
currentWorkingCnt int64
|
||||
isShutDown int32
|
||||
}
|
||||
|
||||
// ErrTaskInvalid will be returned if the task is nil
|
||||
var ErrTaskInvalid = errors.New("Task is nil")
|
||||
|
||||
func (pool *basicPool) GetCurrentWorkingCnt() int64 {
|
||||
return atomic.LoadInt64(&pool.currentWorkingCnt)
|
||||
}
|
||||
|
||||
func (pool *basicPool) AddCurrentWorkingCnt(value int64) int64 {
|
||||
return atomic.AddInt64(&pool.currentWorkingCnt, value)
|
||||
}
|
||||
|
||||
func (pool *basicPool) GetWorkerCnt() int64 {
|
||||
return atomic.LoadInt64(&pool.workerCnt)
|
||||
}
|
||||
|
||||
func (pool *basicPool) AddWorkerCnt(value int64) int64 {
|
||||
return atomic.AddInt64(&pool.workerCnt, value)
|
||||
}
|
||||
|
||||
func (pool *basicPool) GetMaxWorkerCnt() int64 {
|
||||
return atomic.LoadInt64(&pool.maxWorkerCnt)
|
||||
}
|
||||
|
||||
func (pool *basicPool) AddMaxWorkerCnt(value int64) int64 {
|
||||
return atomic.AddInt64(&pool.maxWorkerCnt, value)
|
||||
}
|
||||
|
||||
func (pool *basicPool) CompareAndSwapCurrentWorkingCnt(oldValue, newValue int64) bool {
|
||||
return atomic.CompareAndSwapInt64(&pool.currentWorkingCnt, oldValue, newValue)
|
||||
}
|
||||
|
||||
func (pool *basicPool) EnableAutoTune() {
|
||||
|
||||
}
|
||||
|
||||
// RoutinePool defines the coroutine pool struct
|
||||
type RoutinePool struct {
|
||||
basicPool
|
||||
taskQueue chan Task
|
||||
dispatchQueue chan Task
|
||||
workers map[string]*worker
|
||||
cacheCnt int
|
||||
wg *sync.WaitGroup
|
||||
lock *sync.Mutex
|
||||
shutDownWg *sync.WaitGroup
|
||||
autoTune int32
|
||||
}
|
||||
|
||||
// ErrSubmitTimeout will be returned if submit task timeout when calling SubmitWithTimeout function
|
||||
var ErrSubmitTimeout = errors.New("Submit task timeout")
|
||||
|
||||
// ErrPoolShutDown will be returned if RoutinePool is shutdown
|
||||
var ErrPoolShutDown = errors.New("RoutinePool is shutdown")
|
||||
|
||||
// ErrTaskReject will be returned if submit task is rejected
|
||||
var ErrTaskReject = errors.New("Submit task is rejected")
|
||||
|
||||
var closeQueue = signalTask{id: "closeQueue"}
|
||||
|
||||
// NewRoutinePool creates a RoutinePool instance
|
||||
func NewRoutinePool(maxWorkerCnt, cacheCnt int) Pool {
|
||||
if maxWorkerCnt <= 0 {
|
||||
maxWorkerCnt = runtime.NumCPU()
|
||||
}
|
||||
|
||||
pool := &RoutinePool{
|
||||
cacheCnt: cacheCnt,
|
||||
wg: new(sync.WaitGroup),
|
||||
lock: new(sync.Mutex),
|
||||
shutDownWg: new(sync.WaitGroup),
|
||||
autoTune: 0,
|
||||
}
|
||||
pool.isShutDown = 0
|
||||
pool.maxWorkerCnt += int64(maxWorkerCnt)
|
||||
if pool.cacheCnt <= 0 {
|
||||
pool.taskQueue = make(chan Task)
|
||||
} else {
|
||||
pool.taskQueue = make(chan Task, pool.cacheCnt)
|
||||
}
|
||||
pool.workers = make(map[string]*worker, pool.maxWorkerCnt)
|
||||
// dispatchQueue must not have length
|
||||
pool.dispatchQueue = make(chan Task)
|
||||
pool.dispatcher()
|
||||
|
||||
return pool
|
||||
}
|
||||
|
||||
// EnableAutoTune sets the autoTune enabled
|
||||
func (pool *RoutinePool) EnableAutoTune() {
|
||||
atomic.StoreInt32(&pool.autoTune, 1)
|
||||
}
|
||||
|
||||
func (pool *RoutinePool) checkStatus(t Task) error {
|
||||
if t == nil {
|
||||
return ErrTaskInvalid
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&pool.isShutDown) == 1 {
|
||||
return ErrPoolShutDown
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pool *RoutinePool) dispatcher() {
|
||||
pool.shutDownWg.Add(1)
|
||||
go func() {
|
||||
for {
|
||||
task, ok := <-pool.dispatchQueue
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
if task == closeQueue {
|
||||
close(pool.taskQueue)
|
||||
pool.shutDownWg.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
if pool.GetWorkerCnt() < pool.GetMaxWorkerCnt() {
|
||||
pool.addWorker()
|
||||
}
|
||||
|
||||
pool.taskQueue <- task
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// AddMaxWorkerCnt sets the maxWorkerCnt field's value and returns it
|
||||
func (pool *RoutinePool) AddMaxWorkerCnt(value int64) int64 {
|
||||
if atomic.LoadInt32(&pool.autoTune) == 1 {
|
||||
return pool.basicPool.AddMaxWorkerCnt(value)
|
||||
}
|
||||
return pool.GetMaxWorkerCnt()
|
||||
}
|
||||
|
||||
func (pool *RoutinePool) addWorker() {
|
||||
if atomic.LoadInt32(&pool.autoTune) == 1 {
|
||||
pool.lock.Lock()
|
||||
defer pool.lock.Unlock()
|
||||
}
|
||||
w := &worker{}
|
||||
w.name = fmt.Sprintf("woker-%d", len(pool.workers))
|
||||
w.taskQueue = pool.taskQueue
|
||||
w.wg = pool.wg
|
||||
pool.AddWorkerCnt(1)
|
||||
w.pool = pool
|
||||
pool.workers[w.name] = w
|
||||
pool.wg.Add(1)
|
||||
w.start()
|
||||
}
|
||||
|
||||
func (pool *RoutinePool) autoTuneWorker(w *worker) bool {
|
||||
if atomic.LoadInt32(&pool.autoTune) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if w == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
workerCnt := pool.GetWorkerCnt()
|
||||
maxWorkerCnt := pool.GetMaxWorkerCnt()
|
||||
if workerCnt > maxWorkerCnt && atomic.CompareAndSwapInt64(&pool.workerCnt, workerCnt, workerCnt-1) {
|
||||
pool.lock.Lock()
|
||||
defer pool.lock.Unlock()
|
||||
delete(pool.workers, w.name)
|
||||
w.wg.Done()
|
||||
w.release()
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function
|
||||
func (pool *RoutinePool) ExecuteFunc(f func() interface{}) {
|
||||
fw := &funcWrapper{
|
||||
f: f,
|
||||
}
|
||||
pool.Execute(fw)
|
||||
}
|
||||
|
||||
// Execute pushes the specified task to the dispatchQueue
|
||||
func (pool *RoutinePool) Execute(t Task) {
|
||||
if t != nil {
|
||||
pool.dispatchQueue <- t
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function
|
||||
func (pool *RoutinePool) SubmitFunc(f func() interface{}) (Future, error) {
|
||||
fw := &funcWrapper{
|
||||
f: f,
|
||||
}
|
||||
return pool.Submit(fw)
|
||||
}
|
||||
|
||||
// Submit pushes the specified task to the dispatchQueue, and returns the FutureResult and error info
|
||||
func (pool *RoutinePool) Submit(t Task) (Future, error) {
|
||||
if err := pool.checkStatus(t); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &FutureResult{}
|
||||
f.resultChan = make(chan interface{}, 1)
|
||||
tw := &taskWrapper{
|
||||
t: t,
|
||||
f: f,
|
||||
}
|
||||
pool.dispatchQueue <- tw
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// SubmitWithTimeout pushes the specified task to the dispatchQueue, and returns the FutureResult and error info.
|
||||
// Also takes a timeout value, will return ErrSubmitTimeout if it does't complete within that time.
|
||||
func (pool *RoutinePool) SubmitWithTimeout(t Task, timeout int64) (Future, error) {
|
||||
if timeout <= 0 {
|
||||
return pool.Submit(t)
|
||||
}
|
||||
if err := pool.checkStatus(t); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
timeoutChan := make(chan bool, 1)
|
||||
go func() {
|
||||
time.Sleep(time.Duration(time.Millisecond * time.Duration(timeout)))
|
||||
timeoutChan <- true
|
||||
close(timeoutChan)
|
||||
}()
|
||||
|
||||
f := &FutureResult{}
|
||||
f.resultChan = make(chan interface{}, 1)
|
||||
tw := &taskWrapper{
|
||||
t: t,
|
||||
f: f,
|
||||
}
|
||||
select {
|
||||
case pool.dispatchQueue <- tw:
|
||||
return f, nil
|
||||
case _, ok := <-timeoutChan:
|
||||
if ok {
|
||||
return nil, ErrSubmitTimeout
|
||||
}
|
||||
return nil, ErrSubmitTimeout
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *RoutinePool) beforeCloseDispatchQueue() {
|
||||
if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) {
|
||||
return
|
||||
}
|
||||
pool.dispatchQueue <- closeQueue
|
||||
pool.wg.Wait()
|
||||
}
|
||||
|
||||
func (pool *RoutinePool) doCloseDispatchQueue() {
|
||||
close(pool.dispatchQueue)
|
||||
pool.shutDownWg.Wait()
|
||||
}
|
||||
|
||||
// ShutDown closes the RoutinePool instance
|
||||
func (pool *RoutinePool) ShutDown() {
|
||||
pool.beforeCloseDispatchQueue()
|
||||
pool.doCloseDispatchQueue()
|
||||
for _, w := range pool.workers {
|
||||
w.release()
|
||||
}
|
||||
pool.workers = nil
|
||||
pool.taskQueue = nil
|
||||
pool.dispatchQueue = nil
|
||||
}
|
||||
|
||||
// NoChanPool defines the coroutine pool struct
|
||||
type NoChanPool struct {
|
||||
basicPool
|
||||
wg *sync.WaitGroup
|
||||
tokens chan interface{}
|
||||
}
|
||||
|
||||
// NewNochanPool creates a new NoChanPool instance
|
||||
func NewNochanPool(maxWorkerCnt int) Pool {
|
||||
if maxWorkerCnt <= 0 {
|
||||
maxWorkerCnt = runtime.NumCPU()
|
||||
}
|
||||
|
||||
pool := &NoChanPool{
|
||||
wg: new(sync.WaitGroup),
|
||||
tokens: make(chan interface{}, maxWorkerCnt),
|
||||
}
|
||||
pool.isShutDown = 0
|
||||
pool.AddMaxWorkerCnt(int64(maxWorkerCnt))
|
||||
|
||||
for i := 0; i < maxWorkerCnt; i++ {
|
||||
pool.tokens <- struct{}{}
|
||||
}
|
||||
|
||||
return pool
|
||||
}
|
||||
|
||||
func (pool *NoChanPool) acquire() {
|
||||
<-pool.tokens
|
||||
}
|
||||
|
||||
func (pool *NoChanPool) release() {
|
||||
pool.tokens <- 1
|
||||
}
|
||||
|
||||
func (pool *NoChanPool) execute(t Task) {
|
||||
pool.wg.Add(1)
|
||||
go func() {
|
||||
pool.acquire()
|
||||
defer func() {
|
||||
pool.release()
|
||||
pool.wg.Done()
|
||||
}()
|
||||
runTask(t)
|
||||
}()
|
||||
}
|
||||
|
||||
// ShutDown closes the NoChanPool instance
|
||||
func (pool *NoChanPool) ShutDown() {
|
||||
if !atomic.CompareAndSwapInt32(&pool.isShutDown, 0, 1) {
|
||||
return
|
||||
}
|
||||
pool.wg.Wait()
|
||||
}
|
||||
|
||||
// Execute executes the specified task
|
||||
func (pool *NoChanPool) Execute(t Task) {
|
||||
if t != nil {
|
||||
pool.execute(t)
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteFunc creates a funcWrapper instance with the specified function and calls the Execute function
|
||||
func (pool *NoChanPool) ExecuteFunc(f func() interface{}) {
|
||||
fw := &funcWrapper{
|
||||
f: f,
|
||||
}
|
||||
pool.Execute(fw)
|
||||
}
|
||||
|
||||
// Submit executes the specified task, and returns the FutureResult and error info
|
||||
func (pool *NoChanPool) Submit(t Task) (Future, error) {
|
||||
if t == nil {
|
||||
return nil, ErrTaskInvalid
|
||||
}
|
||||
|
||||
f := &FutureResult{}
|
||||
f.resultChan = make(chan interface{}, 1)
|
||||
tw := &taskWrapper{
|
||||
t: t,
|
||||
f: f,
|
||||
}
|
||||
|
||||
pool.execute(tw)
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// SubmitFunc creates a funcWrapper instance with the specified function and calls the Submit function
|
||||
func (pool *NoChanPool) SubmitFunc(f func() interface{}) (Future, error) {
|
||||
fw := &funcWrapper{
|
||||
f: f,
|
||||
}
|
||||
return pool.Submit(fw)
|
||||
}
|
101
myhwoss/obs/progress.go
Normal file
101
myhwoss/obs/progress.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package obs
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
type ProgressEventType int
|
||||
|
||||
type ProgressEvent struct {
|
||||
ConsumedBytes int64
|
||||
TotalBytes int64
|
||||
EventType ProgressEventType
|
||||
}
|
||||
|
||||
const (
|
||||
TransferStartedEvent ProgressEventType = 1 + iota
|
||||
TransferDataEvent
|
||||
TransferCompletedEvent
|
||||
TransferFailedEvent
|
||||
)
|
||||
|
||||
func newProgressEvent(eventType ProgressEventType, consumed, total int64) *ProgressEvent {
|
||||
return &ProgressEvent{
|
||||
ConsumedBytes: consumed,
|
||||
TotalBytes: total,
|
||||
EventType: eventType,
|
||||
}
|
||||
}
|
||||
|
||||
type ProgressListener interface {
|
||||
ProgressChanged(event *ProgressEvent)
|
||||
}
|
||||
|
||||
type readerTracker struct {
|
||||
completedBytes int64
|
||||
}
|
||||
|
||||
// publishProgress
|
||||
func publishProgress(listener ProgressListener, event *ProgressEvent) {
|
||||
if listener != nil && event != nil {
|
||||
listener.ProgressChanged(event)
|
||||
}
|
||||
}
|
||||
|
||||
type teeReader struct {
|
||||
reader io.Reader
|
||||
consumedBytes int64
|
||||
totalBytes int64
|
||||
tracker *readerTracker
|
||||
listener ProgressListener
|
||||
}
|
||||
|
||||
func TeeReader(reader io.Reader, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser {
|
||||
return &teeReader{
|
||||
reader: reader,
|
||||
consumedBytes: 0,
|
||||
totalBytes: totalBytes,
|
||||
tracker: tracker,
|
||||
listener: listener,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *teeReader) Read(p []byte) (n int, err error) {
|
||||
n, err = t.reader.Read(p)
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes)
|
||||
publishProgress(t.listener, event)
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
t.consumedBytes += int64(n)
|
||||
|
||||
if t.listener != nil {
|
||||
event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes)
|
||||
publishProgress(t.listener, event)
|
||||
}
|
||||
|
||||
if t.tracker != nil {
|
||||
t.tracker.completedBytes = t.consumedBytes
|
||||
}
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
event := newProgressEvent(TransferCompletedEvent, t.consumedBytes, t.totalBytes)
|
||||
publishProgress(t.listener, event)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *teeReader) Size() int64 {
|
||||
return r.totalBytes
|
||||
}
|
||||
|
||||
func (t *teeReader) Close() error {
|
||||
if rc, ok := t.reader.(io.ReadCloser); ok {
|
||||
return rc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
242
myhwoss/obs/provider.go
Normal file
242
myhwoss/obs/provider.go
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
accessKeyEnv = "OBS_ACCESS_KEY_ID"
|
||||
securityKeyEnv = "OBS_SECRET_ACCESS_KEY"
|
||||
securityTokenEnv = "OBS_SECURITY_TOKEN"
|
||||
ecsRequestURL = "http://169.254.169.254/openstack/latest/securitykey"
|
||||
)
|
||||
|
||||
type securityHolder struct {
|
||||
ak string
|
||||
sk string
|
||||
securityToken string
|
||||
}
|
||||
|
||||
var emptySecurityHolder = securityHolder{}
|
||||
|
||||
type securityProvider interface {
|
||||
getSecurity() securityHolder
|
||||
}
|
||||
|
||||
type BasicSecurityProvider struct {
|
||||
val atomic.Value
|
||||
}
|
||||
|
||||
func (bsp *BasicSecurityProvider) getSecurity() securityHolder {
|
||||
if sh, ok := bsp.val.Load().(securityHolder); ok {
|
||||
return sh
|
||||
}
|
||||
return emptySecurityHolder
|
||||
}
|
||||
|
||||
func (bsp *BasicSecurityProvider) refresh(ak, sk, securityToken string) {
|
||||
bsp.val.Store(securityHolder{ak: strings.TrimSpace(ak), sk: strings.TrimSpace(sk), securityToken: strings.TrimSpace(securityToken)})
|
||||
}
|
||||
|
||||
func NewBasicSecurityProvider(ak, sk, securityToken string) *BasicSecurityProvider {
|
||||
bsp := &BasicSecurityProvider{}
|
||||
bsp.refresh(ak, sk, securityToken)
|
||||
return bsp
|
||||
}
|
||||
|
||||
type EnvSecurityProvider struct {
|
||||
sh securityHolder
|
||||
suffix string
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
func (esp *EnvSecurityProvider) getSecurity() securityHolder {
|
||||
//ensure run only once
|
||||
esp.once.Do(func() {
|
||||
esp.sh = securityHolder{
|
||||
ak: strings.TrimSpace(os.Getenv(accessKeyEnv + esp.suffix)),
|
||||
sk: strings.TrimSpace(os.Getenv(securityKeyEnv + esp.suffix)),
|
||||
securityToken: strings.TrimSpace(os.Getenv(securityTokenEnv + esp.suffix)),
|
||||
}
|
||||
})
|
||||
|
||||
return esp.sh
|
||||
}
|
||||
|
||||
func NewEnvSecurityProvider(suffix string) *EnvSecurityProvider {
|
||||
if suffix != "" {
|
||||
suffix = "_" + suffix
|
||||
}
|
||||
esp := &EnvSecurityProvider{
|
||||
suffix: suffix,
|
||||
}
|
||||
return esp
|
||||
}
|
||||
|
||||
type TemporarySecurityHolder struct {
|
||||
securityHolder
|
||||
expireDate time.Time
|
||||
}
|
||||
|
||||
var emptyTemporarySecurityHolder = TemporarySecurityHolder{}
|
||||
|
||||
type EcsSecurityProvider struct {
|
||||
val atomic.Value
|
||||
lock sync.Mutex
|
||||
httpClient *http.Client
|
||||
prefetch int32
|
||||
retryCount int
|
||||
}
|
||||
|
||||
func (ecsSp *EcsSecurityProvider) loadTemporarySecurityHolder() (TemporarySecurityHolder, bool) {
|
||||
if sh := ecsSp.val.Load(); sh == nil {
|
||||
return emptyTemporarySecurityHolder, false
|
||||
} else if _sh, ok := sh.(TemporarySecurityHolder); !ok {
|
||||
return emptyTemporarySecurityHolder, false
|
||||
} else {
|
||||
return _sh, true
|
||||
}
|
||||
}
|
||||
|
||||
func (ecsSp *EcsSecurityProvider) getAndSetSecurityWithOutLock() securityHolder {
|
||||
_sh := TemporarySecurityHolder{}
|
||||
_sh.expireDate = time.Now().Add(time.Minute * 5)
|
||||
retryCount := 0
|
||||
for {
|
||||
if req, err := http.NewRequest("GET", ecsRequestURL, nil); err == nil {
|
||||
start := GetCurrentTimestamp()
|
||||
res, err := ecsSp.httpClient.Do(req)
|
||||
if err == nil {
|
||||
if data, _err := ioutil.ReadAll(res.Body); _err == nil {
|
||||
temp := &struct {
|
||||
Credential struct {
|
||||
AK string `json:"access,omitempty"`
|
||||
SK string `json:"secret,omitempty"`
|
||||
SecurityToken string `json:"securitytoken,omitempty"`
|
||||
ExpireDate time.Time `json:"expires_at,omitempty"`
|
||||
} `json:"credential"`
|
||||
}{}
|
||||
|
||||
doLog(LEVEL_DEBUG, "Get the json data from ecs succeed")
|
||||
|
||||
if jsonErr := json.Unmarshal(data, temp); jsonErr == nil {
|
||||
_sh.ak = temp.Credential.AK
|
||||
_sh.sk = temp.Credential.SK
|
||||
_sh.securityToken = temp.Credential.SecurityToken
|
||||
_sh.expireDate = temp.Credential.ExpireDate.Add(time.Minute * -1)
|
||||
|
||||
doLog(LEVEL_INFO, "Get security from ecs succeed, AK:xxxx, SK:xxxx, SecurityToken:xxxx, ExprireDate %s", _sh.expireDate)
|
||||
|
||||
doLog(LEVEL_INFO, "Get security from ecs succeed, cost %d ms", (GetCurrentTimestamp() - start))
|
||||
break
|
||||
} else {
|
||||
err = jsonErr
|
||||
}
|
||||
} else {
|
||||
err = _err
|
||||
}
|
||||
}
|
||||
|
||||
doLog(LEVEL_WARN, "Try to get security from ecs failed, cost %d ms, err %s", (GetCurrentTimestamp() - start), err.Error())
|
||||
}
|
||||
|
||||
if retryCount >= ecsSp.retryCount {
|
||||
doLog(LEVEL_WARN, "Try to get security from ecs failed and exceed the max retry count")
|
||||
break
|
||||
}
|
||||
sleepTime := float64(retryCount+2) * rand.Float64()
|
||||
if sleepTime > 10 {
|
||||
sleepTime = 10
|
||||
}
|
||||
time.Sleep(time.Duration(sleepTime * float64(time.Second)))
|
||||
retryCount++
|
||||
}
|
||||
|
||||
ecsSp.val.Store(_sh)
|
||||
return _sh.securityHolder
|
||||
}
|
||||
|
||||
func (ecsSp *EcsSecurityProvider) getAndSetSecurity() securityHolder {
|
||||
ecsSp.lock.Lock()
|
||||
defer ecsSp.lock.Unlock()
|
||||
tsh, succeed := ecsSp.loadTemporarySecurityHolder()
|
||||
if !succeed || time.Now().After(tsh.expireDate) {
|
||||
return ecsSp.getAndSetSecurityWithOutLock()
|
||||
}
|
||||
return tsh.securityHolder
|
||||
}
|
||||
|
||||
func (ecsSp *EcsSecurityProvider) getSecurity() securityHolder {
|
||||
if tsh, succeed := ecsSp.loadTemporarySecurityHolder(); succeed {
|
||||
if time.Now().Before(tsh.expireDate) {
|
||||
//not expire
|
||||
if time.Now().Add(time.Minute*5).After(tsh.expireDate) && atomic.CompareAndSwapInt32(&ecsSp.prefetch, 0, 1) {
|
||||
//do prefetch
|
||||
sh := ecsSp.getAndSetSecurityWithOutLock()
|
||||
atomic.CompareAndSwapInt32(&ecsSp.prefetch, 1, 0)
|
||||
return sh
|
||||
}
|
||||
return tsh.securityHolder
|
||||
}
|
||||
return ecsSp.getAndSetSecurity()
|
||||
}
|
||||
|
||||
return ecsSp.getAndSetSecurity()
|
||||
}
|
||||
|
||||
func getInternalTransport() *http.Transport {
|
||||
timeout := 10
|
||||
transport := &http.Transport{
|
||||
Dial: func(network, addr string) (net.Conn, error) {
|
||||
start := GetCurrentTimestamp()
|
||||
conn, err := (&net.Dialer{
|
||||
Timeout: time.Second * time.Duration(timeout),
|
||||
Resolver: net.DefaultResolver,
|
||||
}).Dial(network, addr)
|
||||
|
||||
if isInfoLogEnabled() {
|
||||
doLog(LEVEL_INFO, "Do http dial cost %d ms", (GetCurrentTimestamp() - start))
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return getConnDelegate(conn, timeout, timeout*10), nil
|
||||
},
|
||||
MaxIdleConns: 10,
|
||||
MaxIdleConnsPerHost: 10,
|
||||
ResponseHeaderTimeout: time.Second * time.Duration(timeout),
|
||||
IdleConnTimeout: time.Second * time.Duration(DEFAULT_IDLE_CONN_TIMEOUT),
|
||||
DisableCompression: true,
|
||||
}
|
||||
|
||||
return transport
|
||||
}
|
||||
|
||||
func NewEcsSecurityProvider(retryCount int) *EcsSecurityProvider {
|
||||
ecsSp := &EcsSecurityProvider{
|
||||
retryCount: retryCount,
|
||||
}
|
||||
ecsSp.httpClient = &http.Client{Transport: getInternalTransport(), CheckRedirect: checkRedirectFunc}
|
||||
return ecsSp
|
||||
}
|
65
myhwoss/obs/temporary_createSignedUrl.go
Normal file
65
myhwoss/obs/temporary_createSignedUrl.go
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// CreateSignedUrl creates signed url with the specified CreateSignedUrlInput, and returns the CreateSignedUrlOutput and error
|
||||
func (obsClient ObsClient) CreateSignedUrl(input *CreateSignedUrlInput, extensions ...extensionOptions) (output *CreateSignedUrlOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("CreateSignedUrlInput is nil")
|
||||
}
|
||||
|
||||
params := make(map[string]string, len(input.QueryParams))
|
||||
for key, value := range input.QueryParams {
|
||||
params[key] = value
|
||||
}
|
||||
|
||||
if input.SubResource != "" {
|
||||
params[string(input.SubResource)] = ""
|
||||
}
|
||||
|
||||
headers := make(map[string][]string, len(input.Headers))
|
||||
for key, value := range input.Headers {
|
||||
headers[key] = []string{value}
|
||||
}
|
||||
|
||||
for _, extension := range extensions {
|
||||
if extensionHeader, ok := extension.(extensionHeaders); ok {
|
||||
_err := extensionHeader(headers, obsClient.conf.signature == SignatureObs)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_INFO, fmt.Sprintf("set header with error: %v", _err))
|
||||
}
|
||||
} else {
|
||||
doLog(LEVEL_INFO, "Unsupported extensionOptions")
|
||||
}
|
||||
}
|
||||
|
||||
if input.Expires <= 0 {
|
||||
input.Expires = 300
|
||||
}
|
||||
|
||||
requestURL, err := obsClient.doAuthTemporary(string(input.Method), input.Bucket, input.Key, input.Policy, params, headers, int64(input.Expires))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output = &CreateSignedUrlOutput{
|
||||
SignedUrl: requestURL,
|
||||
ActualSignedRequestHeaders: headers,
|
||||
}
|
||||
return
|
||||
}
|
116
myhwoss/obs/temporary_other.go
Normal file
116
myhwoss/obs/temporary_other.go
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (obsClient ObsClient) isSecurityToken(params map[string]string, sh securityHolder) {
|
||||
if sh.securityToken != "" {
|
||||
if obsClient.conf.signature == SignatureObs {
|
||||
params[HEADER_STS_TOKEN_OBS] = sh.securityToken
|
||||
} else {
|
||||
params[HEADER_STS_TOKEN_AMZ] = sh.securityToken
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CreateBrowserBasedSignature gets the browser based signature with the specified CreateBrowserBasedSignatureInput,
|
||||
// and returns the CreateBrowserBasedSignatureOutput and error
|
||||
func (obsClient ObsClient) CreateBrowserBasedSignature(input *CreateBrowserBasedSignatureInput) (output *CreateBrowserBasedSignatureOutput, err error) {
|
||||
if input == nil {
|
||||
return nil, errors.New("CreateBrowserBasedSignatureInput is nil")
|
||||
}
|
||||
|
||||
params := make(map[string]string, len(input.FormParams))
|
||||
for key, value := range input.FormParams {
|
||||
params[key] = value
|
||||
}
|
||||
|
||||
date := time.Now().UTC()
|
||||
shortDate := date.Format(SHORT_DATE_FORMAT)
|
||||
longDate := date.Format(LONG_DATE_FORMAT)
|
||||
sh := obsClient.getSecurity()
|
||||
|
||||
credential, _ := getCredential(sh.ak, obsClient.conf.region, shortDate)
|
||||
|
||||
if input.Expires <= 0 {
|
||||
input.Expires = 300
|
||||
}
|
||||
|
||||
expiration := date.Add(time.Second * time.Duration(input.Expires)).Format(ISO8601_DATE_FORMAT)
|
||||
if obsClient.conf.signature == SignatureV4 {
|
||||
params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX
|
||||
params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
|
||||
params[PARAM_DATE_AMZ_CAMEL] = longDate
|
||||
}
|
||||
|
||||
obsClient.isSecurityToken(params, sh)
|
||||
|
||||
matchAnyBucket := true
|
||||
matchAnyKey := true
|
||||
count := 5
|
||||
if bucket := strings.TrimSpace(input.Bucket); bucket != "" {
|
||||
params["bucket"] = bucket
|
||||
matchAnyBucket = false
|
||||
count--
|
||||
}
|
||||
|
||||
if key := strings.TrimSpace(input.Key); key != "" {
|
||||
params["key"] = key
|
||||
matchAnyKey = false
|
||||
count--
|
||||
}
|
||||
|
||||
originPolicySlice := make([]string, 0, len(params)+count)
|
||||
originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"expiration\":\"%s\",", expiration))
|
||||
originPolicySlice = append(originPolicySlice, "\"conditions\":[")
|
||||
for key, value := range params {
|
||||
if _key := strings.TrimSpace(strings.ToLower(key)); _key != "" {
|
||||
originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"%s\":\"%s\"},", _key, value))
|
||||
}
|
||||
}
|
||||
|
||||
if matchAnyBucket {
|
||||
originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$bucket\", \"\"],")
|
||||
}
|
||||
|
||||
if matchAnyKey {
|
||||
originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$key\", \"\"],")
|
||||
}
|
||||
|
||||
originPolicySlice = append(originPolicySlice, "]}")
|
||||
|
||||
originPolicy := strings.Join(originPolicySlice, "")
|
||||
policy := Base64Encode([]byte(originPolicy))
|
||||
var signature string
|
||||
if obsClient.conf.signature == SignatureV4 {
|
||||
signature = getSignature(policy, sh.sk, obsClient.conf.region, shortDate)
|
||||
} else {
|
||||
signature = Base64Encode(HmacSha1([]byte(sh.sk), []byte(policy)))
|
||||
}
|
||||
|
||||
output = &CreateBrowserBasedSignatureOutput{
|
||||
OriginPolicy: originPolicy,
|
||||
Policy: policy,
|
||||
Algorithm: params[PARAM_ALGORITHM_AMZ_CAMEL],
|
||||
Credential: params[PARAM_CREDENTIAL_AMZ_CAMEL],
|
||||
Date: params[PARAM_DATE_AMZ_CAMEL],
|
||||
Signature: signature,
|
||||
}
|
||||
return
|
||||
}
|
758
myhwoss/obs/temporary_signedUrl.go
Normal file
758
myhwoss/obs/temporary_signedUrl.go
Normal file
@@ -0,0 +1,758 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ListBucketsWithSignedUrl lists buckets with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) ListBucketsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListBucketsOutput, err error) {
|
||||
output = &ListBucketsOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("ListBuckets", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CreateBucketWithSignedUrl creates bucket with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) CreateBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("CreateBucket", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketWithSignedUrl deletes bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) DeleteBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("DeleteBucket", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketStoragePolicyWithSignedUrl sets bucket storage class with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketStoragePolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketStoragePolicyWithSignedUrl gets bucket storage class with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStoragePolicyOutput, err error) {
|
||||
output = &GetBucketStoragePolicyOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketStoragePolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListObjectsWithSignedUrl lists objects in a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) ListObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListObjectsOutput, err error) {
|
||||
output = &ListObjectsOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("ListObjects", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
|
||||
output.Location = location[0]
|
||||
}
|
||||
if output.EncodingType == "url" {
|
||||
err = decodeListObjectsOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get ListObjectsOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListVersionsWithSignedUrl lists versioning objects in a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) ListVersionsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListVersionsOutput, err error) {
|
||||
output = &ListVersionsOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("ListVersions", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
|
||||
output.Location = location[0]
|
||||
}
|
||||
if output.EncodingType == "url" {
|
||||
err = decodeListVersionsOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get ListVersionsOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListMultipartUploadsWithSignedUrl lists the multipart uploads that are initialized but not combined or aborted in a
|
||||
// specified bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) ListMultipartUploadsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListMultipartUploadsOutput, err error) {
|
||||
output = &ListMultipartUploadsOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("ListMultipartUploads", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else if output.EncodingType == "url" {
|
||||
err = decodeListMultipartUploadsOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get ListMultipartUploadsOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketQuotaWithSignedUrl sets the bucket quota with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketQuota", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketQuotaWithSignedUrl gets the bucket quota with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketQuotaOutput, err error) {
|
||||
output = &GetBucketQuotaOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketQuota", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HeadBucketWithSignedUrl checks whether a bucket exists with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) HeadBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("HeadBucket", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HeadObjectWithSignedUrl checks whether an object exists with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) HeadObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("HeadObject", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketMetadataWithSignedUrl gets the metadata of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketMetadataOutput, err error) {
|
||||
output = &GetBucketMetadataOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseGetBucketMetadataOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketStorageInfoWithSignedUrl gets storage information about a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketStorageInfoWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStorageInfoOutput, err error) {
|
||||
output = &GetBucketStorageInfoOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketStorageInfo", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketLocationWithSignedUrl gets the location of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketLocationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLocationOutput, err error) {
|
||||
output = &GetBucketLocationOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketLocation", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketAclWithSignedUrl sets the bucket ACL with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketAclWithSignedUrl gets the bucket ACL with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketAclOutput, err error) {
|
||||
output = &GetBucketAclOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketPolicyWithSignedUrl sets the bucket policy with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketPolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketPolicyWithSignedUrl gets the bucket policy with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketPolicyOutput, err error) {
|
||||
output = &GetBucketPolicyOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketPolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, false)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketPolicyWithSignedUrl deletes the bucket policy with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) DeleteBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("DeleteBucketPolicy", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketCorsWithSignedUrl sets CORS rules for a bucket with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketCors", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketCorsWithSignedUrl gets CORS rules of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketCorsOutput, err error) {
|
||||
output = &GetBucketCorsOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketCors", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketCorsWithSignedUrl deletes CORS rules of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) DeleteBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("DeleteBucketCors", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketVersioningWithSignedUrl sets the versioning status for a bucket with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketVersioning", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketVersioningWithSignedUrl gets the versioning status of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketVersioningOutput, err error) {
|
||||
output = &GetBucketVersioningOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketVersioning", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketWebsiteConfigurationWithSignedUrl sets website hosting for a bucket with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketWebsiteConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketWebsiteConfigurationWithSignedUrl gets the website hosting settings of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketWebsiteConfigurationOutput, err error) {
|
||||
output = &GetBucketWebsiteConfigurationOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketWebsiteConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketWebsiteConfigurationWithSignedUrl deletes the website hosting settings of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) DeleteBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("DeleteBucketWebsiteConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketLoggingConfigurationWithSignedUrl sets the bucket logging with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketLoggingConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketLoggingConfigurationWithSignedUrl gets the logging settings of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLoggingConfigurationOutput, err error) {
|
||||
output = &GetBucketLoggingConfigurationOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketLoggingConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketLifecycleConfigurationWithSignedUrl sets lifecycle rules for a bucket with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketLifecycleConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketLifecycleConfigurationWithSignedUrl gets lifecycle rules of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLifecycleConfigurationOutput, err error) {
|
||||
output = &GetBucketLifecycleConfigurationOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketLifecycleConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketLifecycleConfigurationWithSignedUrl deletes lifecycle rules of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) DeleteBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("DeleteBucketLifecycleConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketTaggingWithSignedUrl sets bucket tags with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketTagging", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketTaggingWithSignedUrl gets bucket tags with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketTaggingOutput, err error) {
|
||||
output = &GetBucketTaggingOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketTagging", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketTaggingWithSignedUrl deletes bucket tags with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) DeleteBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("DeleteBucketTagging", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketNotificationWithSignedUrl sets event notification for a bucket with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketNotification", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketNotificationWithSignedUrl gets event notification settings of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketNotificationOutput, err error) {
|
||||
output = &GetBucketNotificationOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketNotification", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteObjectWithSignedUrl deletes an object with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) DeleteObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *DeleteObjectOutput, err error) {
|
||||
output = &DeleteObjectOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("DeleteObject", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseDeleteObjectOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteObjectsWithSignedUrl deletes objects in a batch with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) DeleteObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *DeleteObjectsOutput, err error) {
|
||||
output = &DeleteObjectsOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("DeleteObjects", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else if output.EncodingType == "url" {
|
||||
err = decodeDeleteObjectsOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get DeleteObjectsOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetObjectAclWithSignedUrl sets ACL for an object with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetObjectAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetObjectAclWithSignedUrl gets the ACL of an object with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectAclOutput, err error) {
|
||||
output = &GetObjectAclOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetObjectAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
if versionID, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
|
||||
output.VersionId = versionID[0]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RestoreObjectWithSignedUrl restores an object with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) RestoreObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("RestoreObject", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetObjectMetadataWithSignedUrl gets object metadata with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetObjectMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectMetadataOutput, err error) {
|
||||
output = &GetObjectMetadataOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetObjectMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseGetObjectMetadataOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetObjectWithSignedUrl downloads object with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectOutput, err error) {
|
||||
output = &GetObjectOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL(GET_OBJECT, HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseGetObjectOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PutObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) PutObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *PutObjectOutput, err error) {
|
||||
output = &PutObjectOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL(PUT_OBJECT, HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParsePutObjectOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PutFileWithSignedUrl uploads a file to the specified bucket with the specified signed url and signed request headers and sourceFile path
|
||||
func (obsClient ObsClient) PutFileWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, sourceFile string) (output *PutObjectOutput, err error) {
|
||||
var data io.Reader
|
||||
sourceFile = strings.TrimSpace(sourceFile)
|
||||
if sourceFile != "" {
|
||||
fd, _err := os.Open(sourceFile)
|
||||
if _err != nil {
|
||||
err = _err
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
errMsg := fd.Close()
|
||||
if errMsg != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close file with reason: %v", errMsg)
|
||||
}
|
||||
}()
|
||||
|
||||
stat, _err := fd.Stat()
|
||||
if _err != nil {
|
||||
err = _err
|
||||
return nil, err
|
||||
}
|
||||
fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
|
||||
fileReaderWrapper.reader = fd
|
||||
|
||||
var contentLength int64
|
||||
if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH_CAMEL]; ok {
|
||||
contentLength = StringToInt64(value[0], -1)
|
||||
} else if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH]; ok {
|
||||
contentLength = StringToInt64(value[0], -1)
|
||||
} else {
|
||||
contentLength = stat.Size()
|
||||
}
|
||||
if contentLength > stat.Size() {
|
||||
return nil, errors.New("ContentLength is larger than fileSize")
|
||||
}
|
||||
fileReaderWrapper.totalCount = contentLength
|
||||
data = fileReaderWrapper
|
||||
}
|
||||
|
||||
output = &PutObjectOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL(PUT_FILE, HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParsePutObjectOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CopyObjectWithSignedUrl creates a copy for an existing object with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) CopyObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyObjectOutput, err error) {
|
||||
output = &CopyObjectOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("CopyObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseCopyObjectOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AbortMultipartUploadWithSignedUrl aborts a multipart upload in a specified bucket by using the multipart upload ID with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) AbortMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("AbortMultipartUpload", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// InitiateMultipartUploadWithSignedUrl initializes a multipart upload with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) InitiateMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *InitiateMultipartUploadOutput, err error) {
|
||||
output = &InitiateMultipartUploadOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("InitiateMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseInitiateMultipartUploadOutput(output)
|
||||
if output.EncodingType == "url" {
|
||||
err = decodeInitiateMultipartUploadOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get InitiateMultipartUploadOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UploadPartWithSignedUrl uploads a part to a specified bucket by using a specified multipart upload ID
|
||||
// with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) UploadPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *UploadPartOutput, err error) {
|
||||
output = &UploadPartOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("UploadPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseUploadPartOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CompleteMultipartUploadWithSignedUrl combines the uploaded parts in a specified bucket by using the multipart upload ID
|
||||
// with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) CompleteMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *CompleteMultipartUploadOutput, err error) {
|
||||
output = &CompleteMultipartUploadOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("CompleteMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseCompleteMultipartUploadOutput(output)
|
||||
if output.EncodingType == "url" {
|
||||
err = decodeCompleteMultipartUploadOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get CompleteMultipartUploadOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListPartsWithSignedUrl lists the uploaded parts in a bucket by using the multipart upload ID with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) ListPartsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListPartsOutput, err error) {
|
||||
output = &ListPartsOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("ListParts", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else if output.EncodingType == "url" {
|
||||
err = decodeListPartsOutput(output)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to get ListPartsOutput with error: %v.", err)
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CopyPartWithSignedUrl copy a part to a specified bucket by using a specified multipart upload ID with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) CopyPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyPartOutput, err error) {
|
||||
output = &CopyPartOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("CopyPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseCopyPartOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketRequestPaymentWithSignedUrl sets requester-pays setting for a bucket with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketRequestPayment", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketRequestPaymentWithSignedUrl gets requester-pays setting of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketRequestPaymentWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketRequestPaymentOutput, err error) {
|
||||
output = &GetBucketRequestPaymentOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketRequestPayment", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBucketEncryptionWithSignedURL sets bucket encryption setting for a bucket with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) SetBucketEncryptionWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("SetBucketEncryption", HTTP_PUT, signedURL, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetBucketEncryptionWithSignedURL gets bucket encryption setting of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) GetBucketEncryptionWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header) (output *GetBucketEncryptionOutput, err error) {
|
||||
output = &GetBucketEncryptionOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("GetBucketEncryption", HTTP_GET, signedURL, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucketEncryptionWithSignedURL deletes bucket encryption setting of a bucket with the specified signed url and signed request headers
|
||||
func (obsClient ObsClient) DeleteBucketEncryptionWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
|
||||
output = &BaseModel{}
|
||||
err = obsClient.doHTTPWithSignedURL("DeleteBucketEncryption", HTTP_DELETE, signedURL, actualSignedRequestHeaders, nil, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AppendObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) AppendObjectWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header, data io.Reader) (output *AppendObjectOutput, err error) {
|
||||
output = &AppendObjectOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL(APPEND_OBJECT, HTTP_POST, signedURL, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
if err = ParseAppendObjectOutput(output); err != nil {
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ModifyObjectWithSignedUrl uploads an object to the specified bucket with the specified signed url and signed request headers and data
|
||||
func (obsClient ObsClient) ModifyObjectWithSignedURL(signedURL string, actualSignedRequestHeaders http.Header, data io.Reader) (output *ModifyObjectOutput, err error) {
|
||||
output = &ModifyObjectOutput{}
|
||||
err = obsClient.doHTTPWithSignedURL("ModifyObject", HTTP_PUT, signedURL, actualSignedRequestHeaders, data, output, true)
|
||||
if err != nil {
|
||||
output = nil
|
||||
} else {
|
||||
ParseModifyObjectOutput(output)
|
||||
}
|
||||
return
|
||||
}
|
154
myhwoss/obs/trait_base.go
Normal file
154
myhwoss/obs/trait_base.go
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
type IRepeatable interface {
|
||||
Reset() error
|
||||
}
|
||||
|
||||
// IReadCloser defines interface with function: setReadCloser
|
||||
type IReadCloser interface {
|
||||
setReadCloser(body io.ReadCloser)
|
||||
}
|
||||
|
||||
func setHeaders(headers map[string][]string, header string, headerValue []string, isObs bool) {
|
||||
if isObs {
|
||||
header = HEADER_PREFIX_OBS + header
|
||||
headers[header] = headerValue
|
||||
} else {
|
||||
header = HEADER_PREFIX + header
|
||||
headers[header] = headerValue
|
||||
}
|
||||
}
|
||||
|
||||
func setHeadersNext(headers map[string][]string, header string, headerNext string, headerValue []string, isObs bool) {
|
||||
if isObs {
|
||||
headers[header] = headerValue
|
||||
} else {
|
||||
headers[headerNext] = headerValue
|
||||
}
|
||||
}
|
||||
|
||||
// IBaseModel defines interface for base response model
|
||||
type IBaseModel interface {
|
||||
setStatusCode(statusCode int)
|
||||
|
||||
setRequestID(requestID string)
|
||||
|
||||
setResponseHeaders(responseHeaders map[string][]string)
|
||||
}
|
||||
|
||||
// ISerializable defines interface with function: trans
|
||||
type ISerializable interface {
|
||||
trans(isObs bool) (map[string]string, map[string][]string, interface{}, error)
|
||||
}
|
||||
|
||||
// DefaultSerializable defines default serializable struct
|
||||
type DefaultSerializable struct {
|
||||
params map[string]string
|
||||
headers map[string][]string
|
||||
data interface{}
|
||||
}
|
||||
|
||||
func (input DefaultSerializable) trans(isObs bool) (map[string]string, map[string][]string, interface{}, error) {
|
||||
return input.params, input.headers, input.data, nil
|
||||
}
|
||||
|
||||
var defaultSerializable = &DefaultSerializable{}
|
||||
|
||||
func newSubResourceSerialV2(subResource SubResourceType, value string) *DefaultSerializable {
|
||||
return &DefaultSerializable{map[string]string{string(subResource): value}, nil, nil}
|
||||
}
|
||||
|
||||
func newSubResourceSerial(subResource SubResourceType) *DefaultSerializable {
|
||||
return &DefaultSerializable{map[string]string{string(subResource): ""}, nil, nil}
|
||||
}
|
||||
|
||||
func trans(subResource SubResourceType, input interface{}) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(subResource): ""}
|
||||
data, err = ConvertRequestToIoReader(input)
|
||||
return
|
||||
}
|
||||
|
||||
func (baseModel *BaseModel) setStatusCode(statusCode int) {
|
||||
baseModel.StatusCode = statusCode
|
||||
}
|
||||
|
||||
func (baseModel *BaseModel) setRequestID(requestID string) {
|
||||
baseModel.RequestId = requestID
|
||||
}
|
||||
|
||||
func (baseModel *BaseModel) setResponseHeaders(responseHeaders map[string][]string) {
|
||||
baseModel.ResponseHeaders = responseHeaders
|
||||
}
|
||||
|
||||
// GetEncryption gets the Encryption field value from SseKmsHeader
|
||||
func (header SseKmsHeader) GetEncryption() string {
|
||||
if header.Encryption != "" {
|
||||
return header.Encryption
|
||||
}
|
||||
if !header.isObs {
|
||||
return DEFAULT_SSE_KMS_ENCRYPTION
|
||||
}
|
||||
return DEFAULT_SSE_KMS_ENCRYPTION_OBS
|
||||
}
|
||||
|
||||
// GetKey gets the Key field value from SseKmsHeader
|
||||
func (header SseKmsHeader) GetKey() string {
|
||||
return header.Key
|
||||
}
|
||||
|
||||
// GetEncryption gets the Encryption field value from SseCHeader
|
||||
func (header SseCHeader) GetEncryption() string {
|
||||
if header.Encryption != "" {
|
||||
return header.Encryption
|
||||
}
|
||||
return DEFAULT_SSE_C_ENCRYPTION
|
||||
}
|
||||
|
||||
// GetKey gets the Key field value from SseCHeader
|
||||
func (header SseCHeader) GetKey() string {
|
||||
return header.Key
|
||||
}
|
||||
|
||||
// GetKeyMD5 gets the KeyMD5 field value from SseCHeader
|
||||
func (header SseCHeader) GetKeyMD5() string {
|
||||
if header.KeyMD5 != "" {
|
||||
return header.KeyMD5
|
||||
}
|
||||
|
||||
if ret, err := Base64Decode(header.GetKey()); err == nil {
|
||||
return Base64Md5(ret)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func setSseHeader(headers map[string][]string, sseHeader ISseHeader, sseCOnly bool, isObs bool) {
|
||||
if sseHeader != nil {
|
||||
if sseCHeader, ok := sseHeader.(SseCHeader); ok {
|
||||
setHeaders(headers, HEADER_SSEC_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
|
||||
setHeaders(headers, HEADER_SSEC_KEY, []string{sseCHeader.GetKey()}, isObs)
|
||||
setHeaders(headers, HEADER_SSEC_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
|
||||
} else if sseKmsHeader, ok := sseHeader.(SseKmsHeader); !sseCOnly && ok {
|
||||
sseKmsHeader.isObs = isObs
|
||||
setHeaders(headers, HEADER_SSEKMS_ENCRYPTION, []string{sseKmsHeader.GetEncryption()}, isObs)
|
||||
if sseKmsHeader.GetKey() != "" {
|
||||
setHeadersNext(headers, HEADER_SSEKMS_KEY_OBS, HEADER_SSEKMS_KEY_AMZ, []string{sseKmsHeader.GetKey()}, isObs)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
284
myhwoss/obs/trait_bucket.go
Normal file
284
myhwoss/obs/trait_bucket.go
Normal file
@@ -0,0 +1,284 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (input ListBucketsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = make(map[string]string)
|
||||
if input.MaxKeys > 0 {
|
||||
params["max-keys"] = IntToString(input.MaxKeys)
|
||||
}
|
||||
if input.Marker != "" {
|
||||
params["marker"] = input.Marker
|
||||
}
|
||||
headers = make(map[string][]string)
|
||||
if input.QueryLocation && !isObs {
|
||||
setHeaders(headers, HEADER_LOCATION_AMZ, []string{"true"}, isObs)
|
||||
}
|
||||
if input.BucketType != "" {
|
||||
setHeaders(headers, HEADER_BUCKET_TYPE, []string{string(input.BucketType)}, true)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input CreateBucketInput) prepareGrantHeaders(headers map[string][]string, isObs bool) {
|
||||
if grantReadID := input.GrantReadId; grantReadID != "" {
|
||||
setHeaders(headers, HEADER_GRANT_READ_OBS, []string{grantReadID}, isObs)
|
||||
}
|
||||
if grantWriteID := input.GrantWriteId; grantWriteID != "" {
|
||||
setHeaders(headers, HEADER_GRANT_WRITE_OBS, []string{grantWriteID}, isObs)
|
||||
}
|
||||
if grantReadAcpID := input.GrantReadAcpId; grantReadAcpID != "" {
|
||||
setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{grantReadAcpID}, isObs)
|
||||
}
|
||||
if grantWriteAcpID := input.GrantWriteAcpId; grantWriteAcpID != "" {
|
||||
setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{grantWriteAcpID}, isObs)
|
||||
}
|
||||
if grantFullControlID := input.GrantFullControlId; grantFullControlID != "" {
|
||||
setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{grantFullControlID}, isObs)
|
||||
}
|
||||
if grantReadDeliveredID := input.GrantReadDeliveredId; grantReadDeliveredID != "" {
|
||||
setHeaders(headers, HEADER_GRANT_READ_DELIVERED_OBS, []string{grantReadDeliveredID}, true)
|
||||
}
|
||||
if grantFullControlDeliveredID := input.GrantFullControlDeliveredId; grantFullControlDeliveredID != "" {
|
||||
setHeaders(headers, HEADER_GRANT_FULL_CONTROL_DELIVERED_OBS, []string{grantFullControlDeliveredID}, true)
|
||||
}
|
||||
}
|
||||
|
||||
func (input CreateBucketInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
headers = make(map[string][]string)
|
||||
if acl := string(input.ACL); acl != "" {
|
||||
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
|
||||
}
|
||||
if storageClass := string(input.StorageClass); storageClass != "" {
|
||||
if !isObs {
|
||||
if storageClass == string(StorageClassWarm) {
|
||||
storageClass = string(storageClassStandardIA)
|
||||
} else if storageClass == string(StorageClassCold) {
|
||||
storageClass = string(storageClassGlacier)
|
||||
}
|
||||
}
|
||||
setHeadersNext(headers, HEADER_STORAGE_CLASS_OBS, HEADER_STORAGE_CLASS, []string{storageClass}, isObs)
|
||||
}
|
||||
if epid := input.Epid; epid != "" {
|
||||
setHeaders(headers, HEADER_EPID_HEADERS, []string{epid}, isObs)
|
||||
}
|
||||
if availableZone := input.AvailableZone; availableZone != "" {
|
||||
setHeaders(headers, HEADER_AZ_REDUNDANCY, []string{availableZone}, isObs)
|
||||
}
|
||||
|
||||
input.prepareGrantHeaders(headers, isObs)
|
||||
if input.IsFSFileInterface {
|
||||
setHeaders(headers, headerFSFileInterface, []string{"Enabled"}, true)
|
||||
}
|
||||
|
||||
if location := strings.TrimSpace(input.Location); location != "" {
|
||||
input.Location = location
|
||||
|
||||
xml := make([]string, 0, 3)
|
||||
xml = append(xml, "<CreateBucketConfiguration>")
|
||||
if isObs {
|
||||
xml = append(xml, fmt.Sprintf("<Location>%s</Location>", input.Location))
|
||||
} else {
|
||||
xml = append(xml, fmt.Sprintf("<LocationConstraint>%s</LocationConstraint>", input.Location))
|
||||
}
|
||||
xml = append(xml, "</CreateBucketConfiguration>")
|
||||
|
||||
data = strings.Join(xml, "")
|
||||
}
|
||||
|
||||
if bucketRedundancy := string(input.BucketRedundancy); bucketRedundancy != "" {
|
||||
setHeaders(headers, HEADER_BUCKET_REDUNDANCY, []string{bucketRedundancy}, isObs)
|
||||
}
|
||||
if input.IsFusionAllowUpgrade {
|
||||
setHeaders(headers, HEADER_FUSION_ALLOW_UPGRADE, []string{"true"}, isObs)
|
||||
}
|
||||
|
||||
if input.IsRedundancyAllowALT {
|
||||
setHeaders(headers, HEADER_FUSION_ALLOW_ALT, []string{"true"}, isObs)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketStoragePolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
xml := make([]string, 0, 1)
|
||||
if !isObs {
|
||||
storageClass := "STANDARD"
|
||||
if input.StorageClass == StorageClassWarm {
|
||||
storageClass = string(storageClassStandardIA)
|
||||
} else if input.StorageClass == StorageClassCold {
|
||||
storageClass = string(storageClassGlacier)
|
||||
}
|
||||
params = map[string]string{string(SubResourceStoragePolicy): ""}
|
||||
xml = append(xml, fmt.Sprintf("<StoragePolicy><DefaultStorageClass>%s</DefaultStorageClass></StoragePolicy>", storageClass))
|
||||
} else {
|
||||
if input.StorageClass != StorageClassWarm && input.StorageClass != StorageClassCold {
|
||||
input.StorageClass = StorageClassStandard
|
||||
}
|
||||
params = map[string]string{string(SubResourceStorageClass): ""}
|
||||
xml = append(xml, fmt.Sprintf("<StorageClass>%s</StorageClass>", input.StorageClass))
|
||||
}
|
||||
data = strings.Join(xml, "")
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketQuotaInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
return trans(SubResourceQuota, input)
|
||||
}
|
||||
|
||||
func (input SetBucketAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceAcl): ""}
|
||||
headers = make(map[string][]string)
|
||||
|
||||
if acl := string(input.ACL); acl != "" {
|
||||
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
|
||||
} else {
|
||||
data, _ = convertBucketACLToXML(input.AccessControlPolicy, false, isObs)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourcePolicy): ""}
|
||||
data = strings.NewReader(input.Policy)
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketCorsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceCors): ""}
|
||||
data, md5, err := ConvertRequestToIoReaderV2(input)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketVersioningInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
return trans(SubResourceVersioning, input)
|
||||
}
|
||||
|
||||
func (input SetBucketWebsiteConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceWebsite): ""}
|
||||
data, _ = ConvertWebsiteConfigurationToXml(input.BucketWebsiteConfiguration, false)
|
||||
return
|
||||
}
|
||||
|
||||
func (input GetBucketMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
headers = make(map[string][]string)
|
||||
if origin := strings.TrimSpace(input.Origin); origin != "" {
|
||||
headers[HEADER_ORIGIN_CAMEL] = []string{origin}
|
||||
}
|
||||
if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" {
|
||||
headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketLoggingConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceLogging): ""}
|
||||
data, _ = ConvertLoggingStatusToXml(input.BucketLoggingStatus, false, isObs)
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketLifecycleConfigurationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceLifecycle): ""}
|
||||
data, md5 := ConvertLifecyleConfigurationToXml(input.BucketLifecyleConfiguration, true, isObs)
|
||||
headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketEncryptionInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceEncryption): ""}
|
||||
data, _ = ConvertEncryptionConfigurationToXml(input.BucketEncryptionConfiguration, false, isObs)
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketTaggingInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceTagging): ""}
|
||||
data, md5, err := ConvertRequestToIoReaderV2(input)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketNotificationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceNotification): ""}
|
||||
data, _ = ConvertNotificationToXml(input.BucketNotification, false, isObs)
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketRequestPaymentInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
return trans(SubResourceRequestPayment, input)
|
||||
}
|
||||
|
||||
func (input SetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
contentType, _ := mimeTypes["json"]
|
||||
headers = make(map[string][]string, 2)
|
||||
headers[HEADER_CONTENT_TYPE] = []string{contentType}
|
||||
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
|
||||
data, err = convertFetchPolicyToJSON(input)
|
||||
return
|
||||
}
|
||||
|
||||
func (input GetBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
headers = make(map[string][]string, 1)
|
||||
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
|
||||
return
|
||||
}
|
||||
|
||||
func (input DeleteBucketFetchPolicyInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
headers = make(map[string][]string, 1)
|
||||
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
contentType, _ := mimeTypes["json"]
|
||||
headers = make(map[string][]string, 2)
|
||||
headers[HEADER_CONTENT_TYPE] = []string{contentType}
|
||||
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
|
||||
data, err = convertFetchJobToJSON(input)
|
||||
return
|
||||
}
|
||||
|
||||
func (input GetBucketFetchJobInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
headers = make(map[string][]string, 1)
|
||||
setHeaders(headers, headerOefMarker, []string{"yes"}, isObs)
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetBucketMirrorBackToSourceInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceMirrorBackToSource): ""}
|
||||
|
||||
contentType, _ := mimeTypes["json"]
|
||||
headers = make(map[string][]string, 1)
|
||||
headers[HEADER_CONTENT_TYPE] = []string{contentType}
|
||||
data = input.Rules
|
||||
return
|
||||
}
|
||||
|
||||
func (input DeleteBucketCustomDomainInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
return trans(SubResourceCustomDomain, input)
|
||||
}
|
||||
|
||||
func (input SetBucketCustomDomainInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
return trans(SubResourceCustomDomain, input)
|
||||
}
|
486
myhwoss/obs/trait_object.go
Normal file
486
myhwoss/obs/trait_object.go
Normal file
@@ -0,0 +1,486 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (output *GetObjectOutput) setReadCloser(body io.ReadCloser) {
|
||||
output.Body = body
|
||||
}
|
||||
|
||||
func (input ListObjsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = make(map[string]string)
|
||||
if input.Prefix != "" {
|
||||
params["prefix"] = input.Prefix
|
||||
}
|
||||
if input.Delimiter != "" {
|
||||
params["delimiter"] = input.Delimiter
|
||||
}
|
||||
if input.MaxKeys > 0 {
|
||||
params["max-keys"] = IntToString(input.MaxKeys)
|
||||
}
|
||||
if input.EncodingType != "" {
|
||||
params["encoding-type"] = input.EncodingType
|
||||
}
|
||||
headers = make(map[string][]string)
|
||||
if origin := strings.TrimSpace(input.Origin); origin != "" {
|
||||
headers[HEADER_ORIGIN_CAMEL] = []string{origin}
|
||||
}
|
||||
if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" {
|
||||
headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input ListObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params, headers, data, err = input.ListObjsInput.trans(isObs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if input.Marker != "" {
|
||||
params["marker"] = input.Marker
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input ListVersionsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params, headers, data, err = input.ListObjsInput.trans(isObs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
params[string(SubResourceVersions)] = ""
|
||||
if input.KeyMarker != "" {
|
||||
params["key-marker"] = input.KeyMarker
|
||||
}
|
||||
if input.VersionIdMarker != "" {
|
||||
params["version-id-marker"] = input.VersionIdMarker
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input DeleteObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = make(map[string]string)
|
||||
if input.VersionId != "" {
|
||||
params[PARAM_VERSION_ID] = input.VersionId
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input DeleteObjectsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceDelete): ""}
|
||||
if strings.ToLower(input.EncodingType) == "url" {
|
||||
for index, object := range input.Objects {
|
||||
input.Objects[index].Key = url.QueryEscape(object.Key)
|
||||
}
|
||||
}
|
||||
data, md5 := convertDeleteObjectsToXML(input)
|
||||
headers = map[string][]string{HEADER_MD5_CAMEL: {md5}}
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceAcl): ""}
|
||||
if input.VersionId != "" {
|
||||
params[PARAM_VERSION_ID] = input.VersionId
|
||||
}
|
||||
headers = make(map[string][]string)
|
||||
if acl := string(input.ACL); acl != "" {
|
||||
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
|
||||
} else {
|
||||
data, _ = ConvertAclToXml(input.AccessControlPolicy, false, isObs)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input GetObjectAclInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceAcl): ""}
|
||||
if input.VersionId != "" {
|
||||
params[PARAM_VERSION_ID] = input.VersionId
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input RestoreObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceRestore): ""}
|
||||
if input.VersionId != "" {
|
||||
params[PARAM_VERSION_ID] = input.VersionId
|
||||
}
|
||||
if !isObs {
|
||||
data, err = ConvertRequestToIoReader(input)
|
||||
} else {
|
||||
data = ConverntObsRestoreToXml(input)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input GetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = make(map[string]string)
|
||||
if input.VersionId != "" {
|
||||
params[PARAM_VERSION_ID] = input.VersionId
|
||||
}
|
||||
headers = make(map[string][]string)
|
||||
|
||||
if input.Origin != "" {
|
||||
headers[HEADER_ORIGIN_CAMEL] = []string{input.Origin}
|
||||
}
|
||||
|
||||
if input.RequestHeader != "" {
|
||||
headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{input.RequestHeader}
|
||||
}
|
||||
setSseHeader(headers, input.SseHeader, true, isObs)
|
||||
return
|
||||
}
|
||||
|
||||
func (input SetObjectMetadataInput) prepareContentHeaders(headers map[string][]string) {
|
||||
if input.CacheControl != "" {
|
||||
headers[HEADER_CACHE_CONTROL_CAMEL] = []string{input.CacheControl}
|
||||
}
|
||||
if input.ContentDisposition != "" {
|
||||
headers[HEADER_CONTENT_DISPOSITION_CAMEL] = []string{input.ContentDisposition}
|
||||
}
|
||||
if input.ContentEncoding != "" {
|
||||
headers[HEADER_CONTENT_ENCODING_CAMEL] = []string{input.ContentEncoding}
|
||||
}
|
||||
if input.ContentLanguage != "" {
|
||||
headers[HEADER_CONTENT_LANGUAGE_CAMEL] = []string{input.ContentLanguage}
|
||||
}
|
||||
if input.ContentType != "" {
|
||||
headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
|
||||
}
|
||||
// 这里为了兼容老版本,默认以Expire值为准,但如果Expires没有,则以HttpExpires为准。
|
||||
if input.Expires != "" {
|
||||
headers[HEADER_EXPIRES_CAMEL] = []string{input.Expires}
|
||||
} else if input.HttpExpires != "" {
|
||||
headers[HEADER_EXPIRES_CAMEL] = []string{input.HttpExpires}
|
||||
}
|
||||
}
|
||||
|
||||
func (input SetObjectMetadataInput) prepareStorageClass(headers map[string][]string, isObs bool) {
|
||||
if storageClass := string(input.StorageClass); storageClass != "" {
|
||||
if !isObs {
|
||||
if storageClass == string(StorageClassWarm) {
|
||||
storageClass = string(storageClassStandardIA)
|
||||
} else if storageClass == string(StorageClassCold) {
|
||||
storageClass = string(storageClassGlacier)
|
||||
}
|
||||
}
|
||||
setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs)
|
||||
}
|
||||
}
|
||||
|
||||
func (input SetObjectMetadataInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = make(map[string]string)
|
||||
params = map[string]string{string(SubResourceMetadata): ""}
|
||||
if input.VersionId != "" {
|
||||
params[PARAM_VERSION_ID] = input.VersionId
|
||||
}
|
||||
headers = make(map[string][]string)
|
||||
|
||||
if directive := string(input.MetadataDirective); directive != "" {
|
||||
setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(input.MetadataDirective)}, isObs)
|
||||
} else {
|
||||
setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{string(ReplaceNew)}, isObs)
|
||||
}
|
||||
|
||||
input.prepareContentHeaders(headers)
|
||||
if input.WebsiteRedirectLocation != "" {
|
||||
setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs)
|
||||
}
|
||||
input.prepareStorageClass(headers, isObs)
|
||||
if input.Metadata != nil {
|
||||
for key, value := range input.Metadata {
|
||||
key = strings.TrimSpace(key)
|
||||
setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input GetObjectInput) prepareResponseParams(params map[string]string) {
|
||||
if input.ResponseCacheControl != "" {
|
||||
params[PARAM_RESPONSE_CACHE_CONTROL] = input.ResponseCacheControl
|
||||
}
|
||||
if input.ResponseContentDisposition != "" {
|
||||
params[PARAM_RESPONSE_CONTENT_DISPOSITION] = input.ResponseContentDisposition
|
||||
}
|
||||
if input.ResponseContentEncoding != "" {
|
||||
params[PARAM_RESPONSE_CONTENT_ENCODING] = input.ResponseContentEncoding
|
||||
}
|
||||
if input.ResponseContentLanguage != "" {
|
||||
params[PARAM_RESPONSE_CONTENT_LANGUAGE] = input.ResponseContentLanguage
|
||||
}
|
||||
if input.ResponseContentType != "" {
|
||||
params[PARAM_RESPONSE_CONTENT_TYPE] = input.ResponseContentType
|
||||
}
|
||||
if input.ResponseExpires != "" {
|
||||
params[PARAM_RESPONSE_EXPIRES] = input.ResponseExpires
|
||||
}
|
||||
}
|
||||
|
||||
func (input GetObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params, headers, data, err = input.GetObjectMetadataInput.trans(isObs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
input.prepareResponseParams(params)
|
||||
if input.ImageProcess != "" {
|
||||
params[PARAM_IMAGE_PROCESS] = input.ImageProcess
|
||||
}
|
||||
if input.RangeStart >= 0 && input.RangeEnd > input.RangeStart {
|
||||
headers[HEADER_RANGE] = []string{fmt.Sprintf("bytes=%d-%d", input.RangeStart, input.RangeEnd)}
|
||||
}
|
||||
if input.AcceptEncoding != "" {
|
||||
headers[HEADER_ACCEPT_ENCODING] = []string{input.AcceptEncoding}
|
||||
}
|
||||
if input.IfMatch != "" {
|
||||
headers[HEADER_IF_MATCH] = []string{input.IfMatch}
|
||||
}
|
||||
if input.IfNoneMatch != "" {
|
||||
headers[HEADER_IF_NONE_MATCH] = []string{input.IfNoneMatch}
|
||||
}
|
||||
if !input.IfModifiedSince.IsZero() {
|
||||
headers[HEADER_IF_MODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfModifiedSince)}
|
||||
}
|
||||
if !input.IfUnmodifiedSince.IsZero() {
|
||||
headers[HEADER_IF_UNMODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfUnmodifiedSince)}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input ObjectOperationInput) prepareGrantHeaders(headers map[string][]string, isObs bool) {
|
||||
if GrantReadID := input.GrantReadId; GrantReadID != "" {
|
||||
setHeaders(headers, HEADER_GRANT_READ_OBS, []string{GrantReadID}, isObs)
|
||||
}
|
||||
if GrantReadAcpID := input.GrantReadAcpId; GrantReadAcpID != "" {
|
||||
setHeaders(headers, HEADER_GRANT_READ_ACP_OBS, []string{GrantReadAcpID}, isObs)
|
||||
}
|
||||
if GrantWriteAcpID := input.GrantWriteAcpId; GrantWriteAcpID != "" {
|
||||
setHeaders(headers, HEADER_GRANT_WRITE_ACP_OBS, []string{GrantWriteAcpID}, isObs)
|
||||
}
|
||||
if GrantFullControlID := input.GrantFullControlId; GrantFullControlID != "" {
|
||||
setHeaders(headers, HEADER_GRANT_FULL_CONTROL_OBS, []string{GrantFullControlID}, isObs)
|
||||
}
|
||||
}
|
||||
|
||||
func (input ObjectOperationInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
headers = make(map[string][]string)
|
||||
params = make(map[string]string)
|
||||
if acl := string(input.ACL); acl != "" {
|
||||
setHeaders(headers, HEADER_ACL, []string{acl}, isObs)
|
||||
}
|
||||
input.prepareGrantHeaders(headers, isObs)
|
||||
if storageClass := string(input.StorageClass); storageClass != "" {
|
||||
if !isObs {
|
||||
if storageClass == string(StorageClassWarm) {
|
||||
storageClass = string(storageClassStandardIA)
|
||||
} else if storageClass == string(StorageClassCold) {
|
||||
storageClass = string(storageClassGlacier)
|
||||
}
|
||||
}
|
||||
setHeaders(headers, HEADER_STORAGE_CLASS2, []string{storageClass}, isObs)
|
||||
}
|
||||
if input.WebsiteRedirectLocation != "" {
|
||||
setHeaders(headers, HEADER_WEBSITE_REDIRECT_LOCATION, []string{input.WebsiteRedirectLocation}, isObs)
|
||||
|
||||
}
|
||||
setSseHeader(headers, input.SseHeader, false, isObs)
|
||||
if input.Expires != 0 {
|
||||
setHeaders(headers, HEADER_EXPIRES, []string{Int64ToString(input.Expires)}, true)
|
||||
}
|
||||
if input.Metadata != nil {
|
||||
for key, value := range input.Metadata {
|
||||
key = strings.TrimSpace(key)
|
||||
setHeadersNext(headers, HEADER_PREFIX_META_OBS+key, HEADER_PREFIX_META+key, []string{value}, isObs)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input PutObjectBasicInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params, headers, data, err = input.ObjectOperationInput.trans(isObs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if input.ContentMD5 != "" {
|
||||
headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5}
|
||||
}
|
||||
|
||||
if input.ContentLength > 0 {
|
||||
headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)}
|
||||
}
|
||||
if input.ContentType != "" {
|
||||
headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
|
||||
}
|
||||
if input.ContentEncoding != "" {
|
||||
headers[HEADER_CONTENT_ENCODING_CAMEL] = []string{input.ContentEncoding}
|
||||
}
|
||||
if input.CacheControl != "" {
|
||||
headers[HEADER_CACHE_CONTROL_CAMEL] = []string{input.CacheControl}
|
||||
}
|
||||
if input.ContentDisposition != "" {
|
||||
headers[HEADER_CONTENT_DISPOSITION_CAMEL] = []string{input.ContentDisposition}
|
||||
}
|
||||
if input.ContentLanguage != "" {
|
||||
headers[HEADER_CONTENT_LANGUAGE_CAMEL] = []string{input.ContentLanguage}
|
||||
}
|
||||
if input.HttpExpires != "" {
|
||||
headers[HEADER_EXPIRES_CAMEL] = []string{input.HttpExpires}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input PutObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params, headers, data, err = input.PutObjectBasicInput.trans(isObs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if input.Body != nil {
|
||||
data = input.Body
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input AppendObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params, headers, data, err = input.PutObjectBasicInput.trans(isObs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
params[string(SubResourceAppend)] = ""
|
||||
params["position"] = strconv.FormatInt(input.Position, 10)
|
||||
if input.Body != nil {
|
||||
data = input.Body
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input ModifyObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
headers = make(map[string][]string)
|
||||
params = make(map[string]string)
|
||||
params[string(SubResourceModify)] = ""
|
||||
params["position"] = strconv.FormatInt(input.Position, 10)
|
||||
if input.ContentLength > 0 {
|
||||
headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)}
|
||||
}
|
||||
|
||||
if input.Body != nil {
|
||||
data = input.Body
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input CopyObjectInput) prepareReplaceHeaders(headers map[string][]string) {
|
||||
if input.CacheControl != "" {
|
||||
headers[HEADER_CACHE_CONTROL] = []string{input.CacheControl}
|
||||
}
|
||||
if input.ContentDisposition != "" {
|
||||
headers[HEADER_CONTENT_DISPOSITION] = []string{input.ContentDisposition}
|
||||
}
|
||||
if input.ContentEncoding != "" {
|
||||
headers[HEADER_CONTENT_ENCODING] = []string{input.ContentEncoding}
|
||||
}
|
||||
if input.ContentLanguage != "" {
|
||||
headers[HEADER_CONTENT_LANGUAGE] = []string{input.ContentLanguage}
|
||||
}
|
||||
if input.ContentType != "" {
|
||||
headers[HEADER_CONTENT_TYPE] = []string{input.ContentType}
|
||||
}
|
||||
if input.Expires != "" {
|
||||
headers[HEADER_EXPIRES] = []string{input.Expires}
|
||||
}
|
||||
}
|
||||
|
||||
func (input CopyObjectInput) prepareCopySourceHeaders(headers map[string][]string, isObs bool) {
|
||||
if input.CopySourceIfMatch != "" {
|
||||
setHeaders(headers, HEADER_COPY_SOURCE_IF_MATCH, []string{input.CopySourceIfMatch}, isObs)
|
||||
}
|
||||
if input.CopySourceIfNoneMatch != "" {
|
||||
setHeaders(headers, HEADER_COPY_SOURCE_IF_NONE_MATCH, []string{input.CopySourceIfNoneMatch}, isObs)
|
||||
}
|
||||
if !input.CopySourceIfModifiedSince.IsZero() {
|
||||
setHeaders(headers, HEADER_COPY_SOURCE_IF_MODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfModifiedSince)}, isObs)
|
||||
}
|
||||
if !input.CopySourceIfUnmodifiedSince.IsZero() {
|
||||
setHeaders(headers, HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE, []string{FormatUtcToRfc1123(input.CopySourceIfUnmodifiedSince)}, isObs)
|
||||
}
|
||||
}
|
||||
|
||||
func (input CopyObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params, headers, data, err = input.ObjectOperationInput.trans(isObs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var copySource string
|
||||
if input.CopySourceVersionId != "" {
|
||||
copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId)
|
||||
} else {
|
||||
copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false))
|
||||
}
|
||||
setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs)
|
||||
|
||||
if directive := string(input.MetadataDirective); directive != "" {
|
||||
setHeaders(headers, HEADER_METADATA_DIRECTIVE, []string{directive}, isObs)
|
||||
}
|
||||
|
||||
if input.MetadataDirective == ReplaceMetadata {
|
||||
input.prepareReplaceHeaders(headers)
|
||||
}
|
||||
|
||||
input.prepareCopySourceHeaders(headers, isObs)
|
||||
if input.SourceSseHeader != nil {
|
||||
if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok {
|
||||
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
|
||||
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs)
|
||||
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
|
||||
}
|
||||
}
|
||||
if input.SuccessActionRedirect != "" {
|
||||
headers[HEADER_SUCCESS_ACTION_REDIRECT] = []string{input.SuccessActionRedirect}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input HeadObjectInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = make(map[string]string)
|
||||
if input.VersionId != "" {
|
||||
params[PARAM_VERSION_ID] = input.VersionId
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input RenameFileInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceRename): ""}
|
||||
params["name"] = input.NewObjectKey
|
||||
headers = make(map[string][]string)
|
||||
if requestPayer := string(input.RequestPayer); requestPayer != "" {
|
||||
headers[HEADER_REQUEST_PAYER] = []string{requestPayer}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input RenameFolderInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceRename): ""}
|
||||
params["name"] = input.NewObjectKey
|
||||
headers = make(map[string][]string)
|
||||
if requestPayer := string(input.RequestPayer); requestPayer != "" {
|
||||
headers[HEADER_REQUEST_PAYER] = []string{requestPayer}
|
||||
}
|
||||
return
|
||||
}
|
75
myhwoss/obs/trait_other.go
Normal file
75
myhwoss/obs/trait_other.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type partSlice []Part
|
||||
|
||||
func (parts partSlice) Len() int {
|
||||
return len(parts)
|
||||
}
|
||||
|
||||
func (parts partSlice) Less(i, j int) bool {
|
||||
return parts[i].PartNumber < parts[j].PartNumber
|
||||
}
|
||||
|
||||
func (parts partSlice) Swap(i, j int) {
|
||||
parts[i], parts[j] = parts[j], parts[i]
|
||||
}
|
||||
|
||||
type readerWrapper struct {
|
||||
reader io.Reader
|
||||
mark int64
|
||||
totalCount int64
|
||||
readedCount int64
|
||||
}
|
||||
|
||||
func (rw *readerWrapper) seek(offset int64, whence int) (int64, error) {
|
||||
if r, ok := rw.reader.(*strings.Reader); ok {
|
||||
return r.Seek(offset, whence)
|
||||
} else if r, ok := rw.reader.(*bytes.Reader); ok {
|
||||
return r.Seek(offset, whence)
|
||||
} else if r, ok := rw.reader.(*os.File); ok {
|
||||
return r.Seek(offset, whence)
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (rw *readerWrapper) Read(p []byte) (n int, err error) {
|
||||
if rw.totalCount == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if rw.totalCount > 0 {
|
||||
n, err = rw.reader.Read(p)
|
||||
readedOnce := int64(n)
|
||||
remainCount := rw.totalCount - rw.readedCount
|
||||
if remainCount > readedOnce {
|
||||
rw.readedCount += readedOnce
|
||||
return n, err
|
||||
}
|
||||
rw.readedCount += remainCount
|
||||
return int(remainCount), io.EOF
|
||||
}
|
||||
return rw.reader.Read(p)
|
||||
}
|
||||
|
||||
type fileReaderWrapper struct {
|
||||
readerWrapper
|
||||
filePath string
|
||||
}
|
122
myhwoss/obs/trait_part.go
Normal file
122
myhwoss/obs/trait_part.go
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func (input ListMultipartUploadsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{string(SubResourceUploads): ""}
|
||||
if input.Prefix != "" {
|
||||
params["prefix"] = input.Prefix
|
||||
}
|
||||
if input.Delimiter != "" {
|
||||
params["delimiter"] = input.Delimiter
|
||||
}
|
||||
if input.MaxUploads > 0 {
|
||||
params["max-uploads"] = IntToString(input.MaxUploads)
|
||||
}
|
||||
if input.KeyMarker != "" {
|
||||
params["key-marker"] = input.KeyMarker
|
||||
}
|
||||
if input.UploadIdMarker != "" {
|
||||
params["upload-id-marker"] = input.UploadIdMarker
|
||||
}
|
||||
if input.EncodingType != "" {
|
||||
params["encoding-type"] = input.EncodingType
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input AbortMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{"uploadId": input.UploadId}
|
||||
return
|
||||
}
|
||||
|
||||
func (input InitiateMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params, headers, data, err = input.ObjectOperationInput.trans(isObs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if input.ContentType != "" {
|
||||
headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
|
||||
}
|
||||
params[string(SubResourceUploads)] = ""
|
||||
if input.EncodingType != "" {
|
||||
params["encoding-type"] = input.EncodingType
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input UploadPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)}
|
||||
headers = make(map[string][]string)
|
||||
setSseHeader(headers, input.SseHeader, true, isObs)
|
||||
if input.ContentMD5 != "" {
|
||||
headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5}
|
||||
}
|
||||
if input.Body != nil {
|
||||
data = input.Body
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input CompleteMultipartUploadInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{"uploadId": input.UploadId}
|
||||
if input.EncodingType != "" {
|
||||
params["encoding-type"] = input.EncodingType
|
||||
}
|
||||
data, _ = ConvertCompleteMultipartUploadInputToXml(input, false)
|
||||
return
|
||||
}
|
||||
|
||||
func (input ListPartsInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{"uploadId": input.UploadId}
|
||||
if input.MaxParts > 0 {
|
||||
params["max-parts"] = IntToString(input.MaxParts)
|
||||
}
|
||||
if input.PartNumberMarker > 0 {
|
||||
params["part-number-marker"] = IntToString(input.PartNumberMarker)
|
||||
}
|
||||
if input.EncodingType != "" {
|
||||
params["encoding-type"] = input.EncodingType
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (input CopyPartInput) trans(isObs bool) (params map[string]string, headers map[string][]string, data interface{}, err error) {
|
||||
params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)}
|
||||
headers = make(map[string][]string, 1)
|
||||
var copySource string
|
||||
if input.CopySourceVersionId != "" {
|
||||
copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false), input.CopySourceVersionId)
|
||||
} else {
|
||||
copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, UrlEncode(input.CopySourceKey, false))
|
||||
}
|
||||
setHeaders(headers, HEADER_COPY_SOURCE, []string{copySource}, isObs)
|
||||
if input.CopySourceRangeStart >= 0 && input.CopySourceRangeEnd > input.CopySourceRangeStart {
|
||||
setHeaders(headers, HEADER_COPY_SOURCE_RANGE, []string{fmt.Sprintf("bytes=%d-%d", input.CopySourceRangeStart, input.CopySourceRangeEnd)}, isObs)
|
||||
}
|
||||
|
||||
setSseHeader(headers, input.SseHeader, true, isObs)
|
||||
if input.SourceSseHeader != nil {
|
||||
if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok {
|
||||
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_ENCRYPTION, []string{sseCHeader.GetEncryption()}, isObs)
|
||||
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY, []string{sseCHeader.GetKey()}, isObs)
|
||||
setHeaders(headers, HEADER_SSEC_COPY_SOURCE_KEY_MD5, []string{sseCHeader.GetKeyMD5()}, isObs)
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
915
myhwoss/obs/transfer.go
Normal file
915
myhwoss/obs/transfer.go
Normal file
@@ -0,0 +1,915 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var errAbort = errors.New("AbortError")
|
||||
|
||||
// FileStatus defines the upload file properties
|
||||
type FileStatus struct {
|
||||
XMLName xml.Name `xml:"FileInfo"`
|
||||
LastModified int64 `xml:"LastModified"`
|
||||
Size int64 `xml:"Size"`
|
||||
}
|
||||
|
||||
// UploadPartInfo defines the upload part properties
|
||||
type UploadPartInfo struct {
|
||||
XMLName xml.Name `xml:"UploadPart"`
|
||||
PartNumber int `xml:"PartNumber"`
|
||||
Etag string `xml:"Etag"`
|
||||
PartSize int64 `xml:"PartSize"`
|
||||
Offset int64 `xml:"Offset"`
|
||||
IsCompleted bool `xml:"IsCompleted"`
|
||||
}
|
||||
|
||||
// UploadCheckpoint defines the upload checkpoint file properties
|
||||
type UploadCheckpoint struct {
|
||||
XMLName xml.Name `xml:"UploadFileCheckpoint"`
|
||||
Bucket string `xml:"Bucket"`
|
||||
Key string `xml:"Key"`
|
||||
UploadId string `xml:"UploadId,omitempty"`
|
||||
UploadFile string `xml:"FileUrl"`
|
||||
FileInfo FileStatus `xml:"FileInfo"`
|
||||
UploadParts []UploadPartInfo `xml:"UploadParts>UploadPart"`
|
||||
}
|
||||
|
||||
func (ufc *UploadCheckpoint) isValid(bucket, key, uploadFile string, fileStat os.FileInfo) bool {
|
||||
if ufc.Bucket != bucket || ufc.Key != key || ufc.UploadFile != uploadFile {
|
||||
doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or uploadFile was changed. clear the record.")
|
||||
return false
|
||||
}
|
||||
|
||||
if ufc.FileInfo.Size != fileStat.Size() || ufc.FileInfo.LastModified != fileStat.ModTime().Unix() {
|
||||
doLog(LEVEL_INFO, "Checkpoint file is invalid, the uploadFile was changed. clear the record.")
|
||||
return false
|
||||
}
|
||||
|
||||
if ufc.UploadId == "" {
|
||||
doLog(LEVEL_INFO, "UploadId is invalid. clear the record.")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type uploadPartTask struct {
|
||||
UploadPartInput
|
||||
obsClient *ObsClient
|
||||
abort *int32
|
||||
extensions []extensionOptions
|
||||
enableCheckpoint bool
|
||||
}
|
||||
|
||||
func (task *uploadPartTask) Run() interface{} {
|
||||
if atomic.LoadInt32(task.abort) == 1 {
|
||||
return errAbort
|
||||
}
|
||||
|
||||
input := &UploadPartInput{}
|
||||
input.Bucket = task.Bucket
|
||||
input.Key = task.Key
|
||||
input.PartNumber = task.PartNumber
|
||||
input.UploadId = task.UploadId
|
||||
input.SseHeader = task.SseHeader
|
||||
input.SourceFile = task.SourceFile
|
||||
input.Offset = task.Offset
|
||||
input.PartSize = task.PartSize
|
||||
extensions := task.extensions
|
||||
|
||||
var output *UploadPartOutput
|
||||
var err error
|
||||
if len(extensions) != 0 {
|
||||
output, err = task.obsClient.UploadPart(input, extensions...)
|
||||
} else {
|
||||
output, err = task.obsClient.UploadPart(input)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if output.ETag == "" {
|
||||
doLog(LEVEL_WARN, "Get invalid etag value after uploading part [%d].", task.PartNumber)
|
||||
if !task.enableCheckpoint {
|
||||
atomic.CompareAndSwapInt32(task.abort, 0, 1)
|
||||
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber)
|
||||
}
|
||||
return fmt.Errorf("get invalid etag value after uploading part [%d]", task.PartNumber)
|
||||
}
|
||||
return output
|
||||
} else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 {
|
||||
atomic.CompareAndSwapInt32(task.abort, 0, 1)
|
||||
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.PartNumber)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func loadCheckpointFile(checkpointFile string, result interface{}) error {
|
||||
ret, err := ioutil.ReadFile(checkpointFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(ret) == 0 {
|
||||
return nil
|
||||
}
|
||||
return xml.Unmarshal(ret, result)
|
||||
}
|
||||
|
||||
func updateCheckpointFile(fc interface{}, checkpointFilePath string) error {
|
||||
result, err := xml.Marshal(fc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(checkpointFilePath, result, 0666)
|
||||
return err
|
||||
}
|
||||
|
||||
func getCheckpointFile(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) (needCheckpoint bool, err error) {
|
||||
checkpointFilePath := input.CheckpointFile
|
||||
checkpointFileStat, err := os.Stat(checkpointFilePath)
|
||||
if err != nil {
|
||||
doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err))
|
||||
return true, nil
|
||||
}
|
||||
if checkpointFileStat.IsDir() {
|
||||
doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.")
|
||||
return false, errors.New("checkpoint file can not be a folder")
|
||||
}
|
||||
err = loadCheckpointFile(checkpointFilePath, ufc)
|
||||
if err != nil {
|
||||
doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err))
|
||||
return true, nil
|
||||
} else if !ufc.isValid(input.Bucket, input.Key, input.UploadFile, uploadFileStat) {
|
||||
if ufc.Bucket != "" && ufc.Key != "" && ufc.UploadId != "" {
|
||||
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to abort upload task [%s].", ufc.UploadId)
|
||||
}
|
||||
}
|
||||
_err := os.Remove(checkpointFilePath)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, fmt.Sprintf("Failed to remove checkpoint file with error: [%v].", _err))
|
||||
}
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func prepareUpload(ufc *UploadCheckpoint, uploadFileStat os.FileInfo, input *UploadFileInput, obsClient *ObsClient, extensions []extensionOptions) error {
|
||||
initiateInput := &InitiateMultipartUploadInput{}
|
||||
initiateInput.ObjectOperationInput = input.ObjectOperationInput
|
||||
initiateInput.ContentType = input.ContentType
|
||||
initiateInput.EncodingType = input.EncodingType
|
||||
var output *InitiateMultipartUploadOutput
|
||||
var err error
|
||||
if len(extensions) != 0 {
|
||||
output, err = obsClient.InitiateMultipartUpload(initiateInput, extensions...)
|
||||
} else {
|
||||
output, err = obsClient.InitiateMultipartUpload(initiateInput)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ufc.Bucket = input.Bucket
|
||||
ufc.Key = input.Key
|
||||
ufc.UploadFile = input.UploadFile
|
||||
ufc.FileInfo = FileStatus{}
|
||||
ufc.FileInfo.Size = uploadFileStat.Size()
|
||||
ufc.FileInfo.LastModified = uploadFileStat.ModTime().Unix()
|
||||
ufc.UploadId = output.UploadId
|
||||
|
||||
err = sliceFile(input.PartSize, ufc)
|
||||
return err
|
||||
}
|
||||
|
||||
func sliceFile(partSize int64, ufc *UploadCheckpoint) error {
|
||||
fileSize := ufc.FileInfo.Size
|
||||
cnt := fileSize / partSize
|
||||
if cnt >= 10000 {
|
||||
partSize = fileSize / 10000
|
||||
if fileSize%10000 != 0 {
|
||||
partSize++
|
||||
}
|
||||
cnt = fileSize / partSize
|
||||
}
|
||||
if fileSize%partSize != 0 {
|
||||
cnt++
|
||||
}
|
||||
|
||||
if partSize > MAX_PART_SIZE {
|
||||
doLog(LEVEL_ERROR, "The source upload file is too large")
|
||||
return fmt.Errorf("The source upload file is too large")
|
||||
}
|
||||
|
||||
if cnt == 0 {
|
||||
uploadPart := UploadPartInfo{}
|
||||
uploadPart.PartNumber = 1
|
||||
ufc.UploadParts = []UploadPartInfo{uploadPart}
|
||||
} else {
|
||||
uploadParts := make([]UploadPartInfo, 0, cnt)
|
||||
var i int64
|
||||
for i = 0; i < cnt; i++ {
|
||||
uploadPart := UploadPartInfo{}
|
||||
uploadPart.PartNumber = int(i) + 1
|
||||
uploadPart.PartSize = partSize
|
||||
uploadPart.Offset = i * partSize
|
||||
uploadParts = append(uploadParts, uploadPart)
|
||||
}
|
||||
if value := fileSize % partSize; value != 0 {
|
||||
uploadParts[cnt-1].PartSize = value
|
||||
}
|
||||
ufc.UploadParts = uploadParts
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func abortTask(bucket, key, uploadID string, obsClient *ObsClient, extensions []extensionOptions) error {
|
||||
input := &AbortMultipartUploadInput{}
|
||||
input.Bucket = bucket
|
||||
input.Key = key
|
||||
input.UploadId = uploadID
|
||||
if len(extensions) != 0 {
|
||||
_, err := obsClient.AbortMultipartUpload(input, extensions...)
|
||||
return err
|
||||
}
|
||||
_, err := obsClient.AbortMultipartUpload(input)
|
||||
return err
|
||||
}
|
||||
|
||||
func handleUploadFileResult(uploadPartError error, ufc *UploadCheckpoint, enableCheckpoint bool, obsClient *ObsClient, extensions []extensionOptions) error {
|
||||
if uploadPartError != nil {
|
||||
if enableCheckpoint {
|
||||
return uploadPartError
|
||||
}
|
||||
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
|
||||
}
|
||||
return uploadPartError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func completeParts(ufc *UploadCheckpoint, enableCheckpoint bool, checkpointFilePath string, obsClient *ObsClient, encodingType string, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
|
||||
completeInput := &CompleteMultipartUploadInput{}
|
||||
completeInput.Bucket = ufc.Bucket
|
||||
completeInput.Key = ufc.Key
|
||||
completeInput.UploadId = ufc.UploadId
|
||||
completeInput.EncodingType = encodingType
|
||||
parts := make([]Part, 0, len(ufc.UploadParts))
|
||||
for _, uploadPart := range ufc.UploadParts {
|
||||
part := Part{}
|
||||
part.PartNumber = uploadPart.PartNumber
|
||||
part.ETag = uploadPart.Etag
|
||||
parts = append(parts, part)
|
||||
}
|
||||
completeInput.Parts = parts
|
||||
var completeOutput *CompleteMultipartUploadOutput
|
||||
if len(extensions) != 0 {
|
||||
completeOutput, err = obsClient.CompleteMultipartUpload(completeInput, extensions...)
|
||||
} else {
|
||||
completeOutput, err = obsClient.CompleteMultipartUpload(completeInput)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if enableCheckpoint {
|
||||
_err := os.Remove(checkpointFilePath)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Upload file successfully, but remove checkpoint file failed with error [%v].", _err)
|
||||
}
|
||||
}
|
||||
return completeOutput, err
|
||||
}
|
||||
if !enableCheckpoint {
|
||||
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, obsClient, extensions)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
|
||||
}
|
||||
}
|
||||
return completeOutput, err
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) resumeUpload(input *UploadFileInput, extensions []extensionOptions) (output *CompleteMultipartUploadOutput, err error) {
|
||||
uploadFileStat, err := os.Stat(input.UploadFile)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, fmt.Sprintf("Failed to stat uploadFile with error: [%v].", err))
|
||||
return nil, err
|
||||
}
|
||||
if uploadFileStat.IsDir() {
|
||||
doLog(LEVEL_ERROR, "UploadFile can not be a folder.")
|
||||
return nil, errors.New("uploadFile can not be a folder")
|
||||
}
|
||||
|
||||
ufc := &UploadCheckpoint{}
|
||||
|
||||
var needCheckpoint = true
|
||||
var checkpointFilePath = input.CheckpointFile
|
||||
var enableCheckpoint = input.EnableCheckpoint
|
||||
if enableCheckpoint {
|
||||
needCheckpoint, err = getCheckpointFile(ufc, uploadFileStat, input, &obsClient, extensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if needCheckpoint {
|
||||
err = prepareUpload(ufc, uploadFileStat, input, &obsClient, extensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if enableCheckpoint {
|
||||
err = updateCheckpointFile(ufc, checkpointFilePath)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", err)
|
||||
_err := abortTask(ufc.Bucket, ufc.Key, ufc.UploadId, &obsClient, extensions)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to abort task [%s].", ufc.UploadId)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uploadPartError := obsClient.uploadPartConcurrent(ufc, checkpointFilePath, input, extensions)
|
||||
err = handleUploadFileResult(uploadPartError, ufc, enableCheckpoint, &obsClient, extensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
completeOutput, err := completeParts(ufc, enableCheckpoint, checkpointFilePath, &obsClient, input.EncodingType, extensions)
|
||||
|
||||
return completeOutput, err
|
||||
}
|
||||
|
||||
func handleUploadTaskResult(result interface{}, ufc *UploadCheckpoint, partNum int, enableCheckpoint bool, checkpointFilePath string, lock *sync.Mutex, completedBytes *int64, listener ProgressListener) (err error) {
|
||||
if uploadPartOutput, ok := result.(*UploadPartOutput); ok {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
ufc.UploadParts[partNum-1].Etag = uploadPartOutput.ETag
|
||||
ufc.UploadParts[partNum-1].IsCompleted = true
|
||||
|
||||
atomic.AddInt64(completedBytes, ufc.UploadParts[partNum-1].PartSize)
|
||||
|
||||
event := newProgressEvent(TransferDataEvent, *completedBytes, ufc.FileInfo.Size)
|
||||
publishProgress(listener, event)
|
||||
|
||||
if enableCheckpoint {
|
||||
_err := updateCheckpointFile(ufc, checkpointFilePath)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err)
|
||||
}
|
||||
}
|
||||
} else if result != errAbort {
|
||||
if _err, ok := result.(error); ok {
|
||||
err = _err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) uploadPartConcurrent(ufc *UploadCheckpoint, checkpointFilePath string, input *UploadFileInput, extensions []extensionOptions) error {
|
||||
pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM)
|
||||
var uploadPartError atomic.Value
|
||||
var errFlag int32
|
||||
var abort int32
|
||||
lock := new(sync.Mutex)
|
||||
|
||||
var completedBytes int64
|
||||
listener := obsClient.getProgressListener(extensions)
|
||||
totalBytes := ufc.FileInfo.Size
|
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
|
||||
publishProgress(listener, event)
|
||||
|
||||
for _, uploadPart := range ufc.UploadParts {
|
||||
if atomic.LoadInt32(&abort) == 1 {
|
||||
break
|
||||
}
|
||||
if uploadPart.IsCompleted {
|
||||
atomic.AddInt64(&completedBytes, uploadPart.PartSize)
|
||||
event := newProgressEvent(TransferDataEvent, completedBytes, ufc.FileInfo.Size)
|
||||
publishProgress(listener, event)
|
||||
continue
|
||||
}
|
||||
task := uploadPartTask{
|
||||
UploadPartInput: UploadPartInput{
|
||||
Bucket: ufc.Bucket,
|
||||
Key: ufc.Key,
|
||||
PartNumber: uploadPart.PartNumber,
|
||||
UploadId: ufc.UploadId,
|
||||
SseHeader: input.SseHeader,
|
||||
SourceFile: input.UploadFile,
|
||||
Offset: uploadPart.Offset,
|
||||
PartSize: uploadPart.PartSize,
|
||||
},
|
||||
obsClient: &obsClient,
|
||||
abort: &abort,
|
||||
extensions: extensions,
|
||||
enableCheckpoint: input.EnableCheckpoint,
|
||||
}
|
||||
pool.ExecuteFunc(func() interface{} {
|
||||
result := task.Run()
|
||||
err := handleUploadTaskResult(result, ufc, task.PartNumber, input.EnableCheckpoint, input.CheckpointFile, lock, &completedBytes, listener)
|
||||
if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) {
|
||||
uploadPartError.Store(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
pool.ShutDown()
|
||||
if err, ok := uploadPartError.Load().(error); ok {
|
||||
|
||||
event := newProgressEvent(TransferFailedEvent, completedBytes, ufc.FileInfo.Size)
|
||||
publishProgress(listener, event)
|
||||
|
||||
return err
|
||||
}
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, ufc.FileInfo.Size)
|
||||
publishProgress(listener, event)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ObjectInfo defines download object info
|
||||
type ObjectInfo struct {
|
||||
XMLName xml.Name `xml:"ObjectInfo"`
|
||||
LastModified int64 `xml:"LastModified"`
|
||||
Size int64 `xml:"Size"`
|
||||
ETag string `xml:"ETag"`
|
||||
}
|
||||
|
||||
// TempFileInfo defines temp download file properties
|
||||
type TempFileInfo struct {
|
||||
XMLName xml.Name `xml:"TempFileInfo"`
|
||||
TempFileUrl string `xml:"TempFileUrl"`
|
||||
Size int64 `xml:"Size"`
|
||||
}
|
||||
|
||||
// DownloadPartInfo defines download part properties
|
||||
type DownloadPartInfo struct {
|
||||
XMLName xml.Name `xml:"DownloadPart"`
|
||||
PartNumber int64 `xml:"PartNumber"`
|
||||
RangeEnd int64 `xml:"RangeEnd"`
|
||||
Offset int64 `xml:"Offset"`
|
||||
IsCompleted bool `xml:"IsCompleted"`
|
||||
}
|
||||
|
||||
// DownloadCheckpoint defines download checkpoint file properties
|
||||
type DownloadCheckpoint struct {
|
||||
XMLName xml.Name `xml:"DownloadFileCheckpoint"`
|
||||
Bucket string `xml:"Bucket"`
|
||||
Key string `xml:"Key"`
|
||||
VersionId string `xml:"VersionId,omitempty"`
|
||||
DownloadFile string `xml:"FileUrl"`
|
||||
ObjectInfo ObjectInfo `xml:"ObjectInfo"`
|
||||
TempFileInfo TempFileInfo `xml:"TempFileInfo"`
|
||||
DownloadParts []DownloadPartInfo `xml:"DownloadParts>DownloadPart"`
|
||||
}
|
||||
|
||||
func (dfc *DownloadCheckpoint) isValid(input *DownloadFileInput, output *GetObjectMetadataOutput) bool {
|
||||
if dfc.Bucket != input.Bucket || dfc.Key != input.Key || dfc.VersionId != input.VersionId || dfc.DownloadFile != input.DownloadFile {
|
||||
doLog(LEVEL_INFO, "Checkpoint file is invalid, the bucketName or objectKey or downloadFile was changed. clear the record.")
|
||||
return false
|
||||
}
|
||||
if dfc.ObjectInfo.LastModified != output.LastModified.Unix() || dfc.ObjectInfo.ETag != output.ETag || dfc.ObjectInfo.Size != output.ContentLength {
|
||||
doLog(LEVEL_INFO, "Checkpoint file is invalid, the object info was changed. clear the record.")
|
||||
return false
|
||||
}
|
||||
if dfc.TempFileInfo.Size != output.ContentLength {
|
||||
doLog(LEVEL_INFO, "Checkpoint file is invalid, size was changed. clear the record.")
|
||||
return false
|
||||
}
|
||||
stat, err := os.Stat(dfc.TempFileInfo.TempFileUrl)
|
||||
if err != nil || stat.Size() != dfc.ObjectInfo.Size {
|
||||
doLog(LEVEL_INFO, "Checkpoint file is invalid, the temp download file was changed. clear the record.")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type downloadPartTask struct {
|
||||
GetObjectInput
|
||||
obsClient *ObsClient
|
||||
extensions []extensionOptions
|
||||
abort *int32
|
||||
partNumber int64
|
||||
tempFileURL string
|
||||
enableCheckpoint bool
|
||||
}
|
||||
|
||||
func (task *downloadPartTask) Run() interface{} {
|
||||
if atomic.LoadInt32(task.abort) == 1 {
|
||||
return errAbort
|
||||
}
|
||||
getObjectInput := &GetObjectInput{}
|
||||
getObjectInput.GetObjectMetadataInput = task.GetObjectMetadataInput
|
||||
getObjectInput.IfMatch = task.IfMatch
|
||||
getObjectInput.IfNoneMatch = task.IfNoneMatch
|
||||
getObjectInput.IfModifiedSince = task.IfModifiedSince
|
||||
getObjectInput.IfUnmodifiedSince = task.IfUnmodifiedSince
|
||||
getObjectInput.RangeStart = task.RangeStart
|
||||
getObjectInput.RangeEnd = task.RangeEnd
|
||||
|
||||
var output *GetObjectOutput
|
||||
var err error
|
||||
if len(task.extensions) != 0 {
|
||||
output, err = task.obsClient.GetObjectWithoutProgress(getObjectInput, task.extensions...)
|
||||
} else {
|
||||
output, err = task.obsClient.GetObjectWithoutProgress(getObjectInput)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
defer func() {
|
||||
errMsg := output.Body.Close()
|
||||
if errMsg != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close response body.")
|
||||
}
|
||||
}()
|
||||
_err := updateDownloadFile(task.tempFileURL, task.RangeStart, output)
|
||||
if _err != nil {
|
||||
if !task.enableCheckpoint {
|
||||
atomic.CompareAndSwapInt32(task.abort, 0, 1)
|
||||
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber)
|
||||
}
|
||||
return _err
|
||||
}
|
||||
return output
|
||||
} else if obsError, ok := err.(ObsError); ok && obsError.StatusCode >= 400 && obsError.StatusCode < 500 {
|
||||
atomic.CompareAndSwapInt32(task.abort, 0, 1)
|
||||
doLog(LEVEL_WARN, "Task is aborted, part number is [%d]", task.partNumber)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getObjectInfo(input *DownloadFileInput, obsClient *ObsClient, extensions []extensionOptions) (getObjectmetaOutput *GetObjectMetadataOutput, err error) {
|
||||
if len(extensions) != 0 {
|
||||
getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput, extensions...)
|
||||
} else {
|
||||
getObjectmetaOutput, err = obsClient.GetObjectMetadata(&input.GetObjectMetadataInput)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getDownloadCheckpointFile(dfc *DownloadCheckpoint, input *DownloadFileInput, output *GetObjectMetadataOutput) (needCheckpoint bool, err error) {
|
||||
checkpointFilePath := input.CheckpointFile
|
||||
checkpointFileStat, err := os.Stat(checkpointFilePath)
|
||||
if err != nil {
|
||||
doLog(LEVEL_DEBUG, fmt.Sprintf("Stat checkpoint file failed with error: [%v].", err))
|
||||
return true, nil
|
||||
}
|
||||
if checkpointFileStat.IsDir() {
|
||||
doLog(LEVEL_ERROR, "Checkpoint file can not be a folder.")
|
||||
return false, errors.New("checkpoint file can not be a folder")
|
||||
}
|
||||
err = loadCheckpointFile(checkpointFilePath, dfc)
|
||||
if err != nil {
|
||||
doLog(LEVEL_WARN, fmt.Sprintf("Load checkpoint file failed with error: [%v].", err))
|
||||
return true, nil
|
||||
} else if !dfc.isValid(input, output) {
|
||||
if dfc.TempFileInfo.TempFileUrl != "" {
|
||||
_err := os.Remove(dfc.TempFileInfo.TempFileUrl)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err)
|
||||
}
|
||||
}
|
||||
_err := os.Remove(checkpointFilePath)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to remove checkpoint file with error [%v].", _err)
|
||||
}
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func sliceObject(objectSize, partSize int64, dfc *DownloadCheckpoint) {
|
||||
cnt := objectSize / partSize
|
||||
if objectSize%partSize > 0 {
|
||||
cnt++
|
||||
}
|
||||
|
||||
if cnt == 0 {
|
||||
downloadPart := DownloadPartInfo{}
|
||||
downloadPart.PartNumber = 1
|
||||
dfc.DownloadParts = []DownloadPartInfo{downloadPart}
|
||||
} else {
|
||||
downloadParts := make([]DownloadPartInfo, 0, cnt)
|
||||
var i int64
|
||||
for i = 0; i < cnt; i++ {
|
||||
downloadPart := DownloadPartInfo{}
|
||||
downloadPart.PartNumber = i + 1
|
||||
downloadPart.Offset = i * partSize
|
||||
downloadPart.RangeEnd = (i+1)*partSize - 1
|
||||
downloadParts = append(downloadParts, downloadPart)
|
||||
}
|
||||
dfc.DownloadParts = downloadParts
|
||||
if value := objectSize % partSize; value > 0 {
|
||||
dfc.DownloadParts[cnt-1].RangeEnd = dfc.ObjectInfo.Size - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createFile(tempFileURL string, fileSize int64) error {
|
||||
fd, err := syscall.Open(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to open temp download file [%s].", tempFileURL)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
errMsg := syscall.Close(fd)
|
||||
if errMsg != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
|
||||
}
|
||||
}()
|
||||
err = syscall.Ftruncate(fd, fileSize)
|
||||
if err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to create file with error [%v].", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareTempFile(tempFileURL string, fileSize int64) error {
|
||||
parentDir := filepath.Dir(tempFileURL)
|
||||
stat, err := os.Stat(parentDir)
|
||||
if err != nil {
|
||||
doLog(LEVEL_DEBUG, "Failed to stat path with error [%v].", err)
|
||||
_err := os.MkdirAll(parentDir, os.ModePerm)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to make dir with error [%v].", _err)
|
||||
return _err
|
||||
}
|
||||
} else if !stat.IsDir() {
|
||||
doLog(LEVEL_ERROR, "Cannot create folder [%s] due to a same file exists.", parentDir)
|
||||
return fmt.Errorf("cannot create folder [%s] due to a same file exists", parentDir)
|
||||
}
|
||||
|
||||
err = createFile(tempFileURL, fileSize)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
fd, err := os.OpenFile(tempFileURL, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to open temp download file [%s].", tempFileURL)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
errMsg := fd.Close()
|
||||
if errMsg != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
|
||||
}
|
||||
}()
|
||||
if fileSize > 0 {
|
||||
_, err = fd.WriteAt([]byte("a"), fileSize-1)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to create temp download file with error [%v].", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleDownloadFileResult(tempFileURL string, enableCheckpoint bool, downloadFileError error) error {
|
||||
if downloadFileError != nil {
|
||||
if !enableCheckpoint {
|
||||
_err := os.Remove(tempFileURL)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _err)
|
||||
}
|
||||
}
|
||||
return downloadFileError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) resumeDownload(input *DownloadFileInput, extensions []extensionOptions) (output *GetObjectMetadataOutput, err error) {
|
||||
getObjectmetaOutput, err := getObjectInfo(input, &obsClient, extensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objectSize := getObjectmetaOutput.ContentLength
|
||||
partSize := input.PartSize
|
||||
dfc := &DownloadCheckpoint{}
|
||||
|
||||
var needCheckpoint = true
|
||||
var checkpointFilePath = input.CheckpointFile
|
||||
var enableCheckpoint = input.EnableCheckpoint
|
||||
if enableCheckpoint {
|
||||
needCheckpoint, err = getDownloadCheckpointFile(dfc, input, getObjectmetaOutput)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if needCheckpoint {
|
||||
dfc.Bucket = input.Bucket
|
||||
dfc.Key = input.Key
|
||||
dfc.VersionId = input.VersionId
|
||||
dfc.DownloadFile = input.DownloadFile
|
||||
dfc.ObjectInfo = ObjectInfo{}
|
||||
dfc.ObjectInfo.LastModified = getObjectmetaOutput.LastModified.Unix()
|
||||
dfc.ObjectInfo.Size = getObjectmetaOutput.ContentLength
|
||||
dfc.ObjectInfo.ETag = getObjectmetaOutput.ETag
|
||||
dfc.TempFileInfo = TempFileInfo{}
|
||||
dfc.TempFileInfo.TempFileUrl = input.DownloadFile + ".tmp"
|
||||
dfc.TempFileInfo.Size = getObjectmetaOutput.ContentLength
|
||||
|
||||
sliceObject(objectSize, partSize, dfc)
|
||||
_err := prepareTempFile(dfc.TempFileInfo.TempFileUrl, dfc.TempFileInfo.Size)
|
||||
if _err != nil {
|
||||
return nil, _err
|
||||
}
|
||||
|
||||
if enableCheckpoint {
|
||||
_err := updateCheckpointFile(dfc, checkpointFilePath)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to update checkpoint file with error [%v].", _err)
|
||||
_errMsg := os.Remove(dfc.TempFileInfo.TempFileUrl)
|
||||
if _errMsg != nil {
|
||||
doLog(LEVEL_WARN, "Failed to remove temp download file with error [%v].", _errMsg)
|
||||
}
|
||||
return nil, _err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
downloadFileError := obsClient.downloadFileConcurrent(input, dfc, extensions)
|
||||
err = handleDownloadFileResult(dfc.TempFileInfo.TempFileUrl, enableCheckpoint, downloadFileError)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = os.Rename(dfc.TempFileInfo.TempFileUrl, input.DownloadFile)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to rename temp download file [%s] to download file [%s] with error [%v].", dfc.TempFileInfo.TempFileUrl, input.DownloadFile, err)
|
||||
return nil, err
|
||||
}
|
||||
if enableCheckpoint {
|
||||
err = os.Remove(checkpointFilePath)
|
||||
if err != nil {
|
||||
doLog(LEVEL_WARN, "Download file successfully, but remove checkpoint file failed with error [%v].", err)
|
||||
}
|
||||
}
|
||||
|
||||
return getObjectmetaOutput, nil
|
||||
}
|
||||
|
||||
func updateDownloadFile(filePath string, rangeStart int64, output *GetObjectOutput) error {
|
||||
fd, err := os.OpenFile(filePath, os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to open file [%s].", filePath)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
errMsg := fd.Close()
|
||||
if errMsg != nil {
|
||||
doLog(LEVEL_WARN, "Failed to close file with error [%v].", errMsg)
|
||||
}
|
||||
}()
|
||||
_, err = fd.Seek(rangeStart, 0)
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to seek file with error [%v].", err)
|
||||
return err
|
||||
}
|
||||
fileWriter := bufio.NewWriterSize(fd, 65536)
|
||||
part := make([]byte, 8192)
|
||||
var readErr error
|
||||
var readCount int
|
||||
for {
|
||||
readCount, readErr = output.Body.Read(part)
|
||||
if readCount > 0 {
|
||||
wcnt, werr := fileWriter.Write(part[0:readCount])
|
||||
if werr != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to write to file with error [%v].", werr)
|
||||
return werr
|
||||
}
|
||||
if wcnt != readCount {
|
||||
doLog(LEVEL_ERROR, "Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt)
|
||||
return fmt.Errorf("Failed to write to file [%s], expect: [%d], actual: [%d]", filePath, readCount, wcnt)
|
||||
}
|
||||
}
|
||||
if readErr != nil {
|
||||
if readErr != io.EOF {
|
||||
doLog(LEVEL_ERROR, "Failed to read response body with error [%v].", readErr)
|
||||
return readErr
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
err = fileWriter.Flush()
|
||||
if err != nil {
|
||||
doLog(LEVEL_ERROR, "Failed to flush file with error [%v].", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleDownloadTaskResult(result interface{}, dfc *DownloadCheckpoint, partNum int64, enableCheckpoint bool, checkpointFile string, lock *sync.Mutex, completedBytes *int64, listener ProgressListener) (err error) {
|
||||
if output, ok := result.(*GetObjectOutput); ok {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
dfc.DownloadParts[partNum-1].IsCompleted = true
|
||||
|
||||
atomic.AddInt64(completedBytes, output.ContentLength)
|
||||
|
||||
event := newProgressEvent(TransferDataEvent, *completedBytes, dfc.ObjectInfo.Size)
|
||||
publishProgress(listener, event)
|
||||
|
||||
if enableCheckpoint {
|
||||
_err := updateCheckpointFile(dfc, checkpointFile)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to update checkpoint file with error [%v].", _err)
|
||||
}
|
||||
}
|
||||
} else if result != errAbort {
|
||||
if _err, ok := result.(error); ok {
|
||||
err = _err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (obsClient ObsClient) downloadFileConcurrent(input *DownloadFileInput, dfc *DownloadCheckpoint, extensions []extensionOptions) error {
|
||||
pool := NewRoutinePool(input.TaskNum, MAX_PART_NUM)
|
||||
var downloadPartError atomic.Value
|
||||
var errFlag int32
|
||||
var abort int32
|
||||
lock := new(sync.Mutex)
|
||||
|
||||
var completedBytes int64
|
||||
listener := obsClient.getProgressListener(extensions)
|
||||
totalBytes := dfc.ObjectInfo.Size
|
||||
event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
|
||||
publishProgress(listener, event)
|
||||
|
||||
for _, downloadPart := range dfc.DownloadParts {
|
||||
if atomic.LoadInt32(&abort) == 1 {
|
||||
break
|
||||
}
|
||||
if downloadPart.IsCompleted {
|
||||
atomic.AddInt64(&completedBytes, downloadPart.RangeEnd-downloadPart.Offset+1)
|
||||
event := newProgressEvent(TransferDataEvent, completedBytes, dfc.ObjectInfo.Size)
|
||||
publishProgress(listener, event)
|
||||
continue
|
||||
}
|
||||
task := downloadPartTask{
|
||||
GetObjectInput: GetObjectInput{
|
||||
GetObjectMetadataInput: input.GetObjectMetadataInput,
|
||||
IfMatch: input.IfMatch,
|
||||
IfNoneMatch: input.IfNoneMatch,
|
||||
IfUnmodifiedSince: input.IfUnmodifiedSince,
|
||||
IfModifiedSince: input.IfModifiedSince,
|
||||
RangeStart: downloadPart.Offset,
|
||||
RangeEnd: downloadPart.RangeEnd,
|
||||
},
|
||||
obsClient: &obsClient,
|
||||
extensions: extensions,
|
||||
abort: &abort,
|
||||
partNumber: downloadPart.PartNumber,
|
||||
tempFileURL: dfc.TempFileInfo.TempFileUrl,
|
||||
enableCheckpoint: input.EnableCheckpoint,
|
||||
}
|
||||
pool.ExecuteFunc(func() interface{} {
|
||||
result := task.Run()
|
||||
err := handleDownloadTaskResult(result, dfc, task.partNumber, input.EnableCheckpoint, input.CheckpointFile, lock, &completedBytes, listener)
|
||||
if err != nil && atomic.CompareAndSwapInt32(&errFlag, 0, 1) {
|
||||
downloadPartError.Store(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
pool.ShutDown()
|
||||
if err, ok := downloadPartError.Load().(error); ok {
|
||||
event := newProgressEvent(TransferFailedEvent, completedBytes, dfc.ObjectInfo.Size)
|
||||
publishProgress(listener, event)
|
||||
return err
|
||||
}
|
||||
event = newProgressEvent(TransferCompletedEvent, completedBytes, dfc.ObjectInfo.Size)
|
||||
publishProgress(listener, event)
|
||||
return nil
|
||||
}
|
354
myhwoss/obs/type.go
Normal file
354
myhwoss/obs/type.go
Normal file
@@ -0,0 +1,354 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
// SignatureType defines type of signature
|
||||
type SignatureType string
|
||||
|
||||
const (
|
||||
// SignatureV2 signature type v2
|
||||
SignatureV2 SignatureType = "v2"
|
||||
// SignatureV4 signature type v4
|
||||
SignatureV4 SignatureType = "v4"
|
||||
// SignatureObs signature type OBS
|
||||
SignatureObs SignatureType = "OBS"
|
||||
)
|
||||
|
||||
// HttpMethodType defines http method type
|
||||
type HttpMethodType string
|
||||
|
||||
const (
|
||||
HttpMethodGet HttpMethodType = HTTP_GET
|
||||
HttpMethodPut HttpMethodType = HTTP_PUT
|
||||
HttpMethodPost HttpMethodType = HTTP_POST
|
||||
HttpMethodDelete HttpMethodType = HTTP_DELETE
|
||||
HttpMethodHead HttpMethodType = HTTP_HEAD
|
||||
HttpMethodOptions HttpMethodType = HTTP_OPTIONS
|
||||
)
|
||||
|
||||
// SubResourceType defines the subResource value
|
||||
type SubResourceType string
|
||||
|
||||
const (
|
||||
// SubResourceStoragePolicy subResource value: storagePolicy
|
||||
SubResourceStoragePolicy SubResourceType = "storagePolicy"
|
||||
|
||||
// SubResourceStorageClass subResource value: storageClass
|
||||
SubResourceStorageClass SubResourceType = "storageClass"
|
||||
|
||||
// SubResourceQuota subResource value: quota
|
||||
SubResourceQuota SubResourceType = "quota"
|
||||
|
||||
// SubResourceStorageInfo subResource value: storageinfo
|
||||
SubResourceStorageInfo SubResourceType = "storageinfo"
|
||||
|
||||
// SubResourceLocation subResource value: location
|
||||
SubResourceLocation SubResourceType = "location"
|
||||
|
||||
// SubResourceAcl subResource value: acl
|
||||
SubResourceAcl SubResourceType = "acl"
|
||||
|
||||
// SubResourcePolicy subResource value: policy
|
||||
SubResourcePolicy SubResourceType = "policy"
|
||||
|
||||
// SubResourceCors subResource value: cors
|
||||
SubResourceCors SubResourceType = "cors"
|
||||
|
||||
// SubResourceVersioning subResource value: versioning
|
||||
SubResourceVersioning SubResourceType = "versioning"
|
||||
|
||||
// SubResourceWebsite subResource value: website
|
||||
SubResourceWebsite SubResourceType = "website"
|
||||
|
||||
// SubResourceLogging subResource value: logging
|
||||
SubResourceLogging SubResourceType = "logging"
|
||||
|
||||
// SubResourceLifecycle subResource value: lifecycle
|
||||
SubResourceLifecycle SubResourceType = "lifecycle"
|
||||
|
||||
// SubResourceNotification subResource value: notification
|
||||
SubResourceNotification SubResourceType = "notification"
|
||||
|
||||
// SubResourceEncryption subResource value: encryption
|
||||
SubResourceEncryption SubResourceType = "encryption"
|
||||
|
||||
// SubResourceTagging subResource value: tagging
|
||||
SubResourceTagging SubResourceType = "tagging"
|
||||
|
||||
// SubResourceDelete subResource value: delete
|
||||
SubResourceDelete SubResourceType = "delete"
|
||||
|
||||
// SubResourceVersions subResource value: versions
|
||||
SubResourceVersions SubResourceType = "versions"
|
||||
|
||||
// SubResourceUploads subResource value: uploads
|
||||
SubResourceUploads SubResourceType = "uploads"
|
||||
|
||||
// SubResourceRestore subResource value: restore
|
||||
SubResourceRestore SubResourceType = "restore"
|
||||
|
||||
// SubResourceMetadata subResource value: metadata
|
||||
SubResourceMetadata SubResourceType = "metadata"
|
||||
|
||||
// SubResourceRequestPayment subResource value: requestPayment
|
||||
SubResourceRequestPayment SubResourceType = "requestPayment"
|
||||
|
||||
// SubResourceAppend subResource value: append
|
||||
SubResourceAppend SubResourceType = "append"
|
||||
|
||||
// SubResourceModify subResource value: modify
|
||||
SubResourceModify SubResourceType = "modify"
|
||||
|
||||
// SubResourceRename subResource value: rename
|
||||
SubResourceRename SubResourceType = "rename"
|
||||
|
||||
// SubResourceCustomDomain subResource value: customdomain
|
||||
SubResourceCustomDomain SubResourceType = "customdomain"
|
||||
|
||||
// SubResourceMirrorBackToSource subResource value: mirrorBackToSource
|
||||
SubResourceMirrorBackToSource SubResourceType = "mirrorBackToSource"
|
||||
)
|
||||
|
||||
// objectKeyType defines the objectKey value
|
||||
type objectKeyType string
|
||||
|
||||
const (
|
||||
// objectKeyExtensionPolicy objectKey value: v1/extension_policy
|
||||
objectKeyExtensionPolicy objectKeyType = "v1/extension_policy"
|
||||
|
||||
// objectKeyAsyncFetchJob objectKey value: v1/async-fetch/jobs
|
||||
objectKeyAsyncFetchJob objectKeyType = "v1/async-fetch/jobs"
|
||||
)
|
||||
|
||||
// AclType defines bucket/object acl type
|
||||
type AclType string
|
||||
|
||||
const (
|
||||
AclPrivate AclType = "private"
|
||||
AclPublicRead AclType = "public-read"
|
||||
AclPublicReadWrite AclType = "public-read-write"
|
||||
AclAuthenticatedRead AclType = "authenticated-read"
|
||||
AclBucketOwnerRead AclType = "bucket-owner-read"
|
||||
AclBucketOwnerFullControl AclType = "bucket-owner-full-control"
|
||||
AclLogDeliveryWrite AclType = "log-delivery-write"
|
||||
AclPublicReadDelivery AclType = "public-read-delivered"
|
||||
AclPublicReadWriteDelivery AclType = "public-read-write-delivered"
|
||||
)
|
||||
|
||||
// StorageClassType defines bucket storage class
|
||||
type StorageClassType string
|
||||
|
||||
const (
|
||||
//StorageClassStandard storage class: STANDARD
|
||||
StorageClassStandard StorageClassType = "STANDARD"
|
||||
|
||||
//StorageClassWarm storage class: WARM
|
||||
StorageClassWarm StorageClassType = "WARM"
|
||||
|
||||
//StorageClassCold storage class: COLD
|
||||
StorageClassCold StorageClassType = "COLD"
|
||||
|
||||
storageClassStandardIA StorageClassType = "STANDARD_IA"
|
||||
storageClassGlacier StorageClassType = "GLACIER"
|
||||
)
|
||||
|
||||
// PermissionType defines permission type
|
||||
type PermissionType string
|
||||
|
||||
const (
|
||||
// PermissionRead permission type: READ
|
||||
PermissionRead PermissionType = "READ"
|
||||
|
||||
// PermissionWrite permission type: WRITE
|
||||
PermissionWrite PermissionType = "WRITE"
|
||||
|
||||
// PermissionReadAcp permission type: READ_ACP
|
||||
PermissionReadAcp PermissionType = "READ_ACP"
|
||||
|
||||
// PermissionWriteAcp permission type: WRITE_ACP
|
||||
PermissionWriteAcp PermissionType = "WRITE_ACP"
|
||||
|
||||
// PermissionFullControl permission type: FULL_CONTROL
|
||||
PermissionFullControl PermissionType = "FULL_CONTROL"
|
||||
)
|
||||
|
||||
// GranteeType defines grantee type
|
||||
type GranteeType string
|
||||
|
||||
const (
|
||||
// GranteeGroup grantee type: Group
|
||||
GranteeGroup GranteeType = "Group"
|
||||
|
||||
// GranteeUser grantee type: CanonicalUser
|
||||
GranteeUser GranteeType = "CanonicalUser"
|
||||
)
|
||||
|
||||
// GroupUriType defines grantee uri type
|
||||
type GroupUriType string
|
||||
|
||||
const (
|
||||
// GroupAllUsers grantee uri type: AllUsers
|
||||
GroupAllUsers GroupUriType = "AllUsers"
|
||||
|
||||
// GroupAuthenticatedUsers grantee uri type: AuthenticatedUsers
|
||||
GroupAuthenticatedUsers GroupUriType = "AuthenticatedUsers"
|
||||
|
||||
// GroupLogDelivery grantee uri type: LogDelivery
|
||||
GroupLogDelivery GroupUriType = "LogDelivery"
|
||||
)
|
||||
|
||||
// VersioningStatusType defines bucket version status
|
||||
type VersioningStatusType string
|
||||
|
||||
const (
|
||||
// VersioningStatusEnabled version status: Enabled
|
||||
VersioningStatusEnabled VersioningStatusType = "Enabled"
|
||||
|
||||
// VersioningStatusSuspended version status: Suspended
|
||||
VersioningStatusSuspended VersioningStatusType = "Suspended"
|
||||
)
|
||||
|
||||
// ProtocolType defines protocol type
|
||||
type ProtocolType string
|
||||
|
||||
const (
|
||||
// ProtocolHttp prorocol type: http
|
||||
ProtocolHttp ProtocolType = "http"
|
||||
|
||||
// ProtocolHttps prorocol type: https
|
||||
ProtocolHttps ProtocolType = "https"
|
||||
)
|
||||
|
||||
// RuleStatusType defines lifeCycle rule status
|
||||
type RuleStatusType string
|
||||
|
||||
const (
|
||||
// RuleStatusEnabled rule status: Enabled
|
||||
RuleStatusEnabled RuleStatusType = "Enabled"
|
||||
|
||||
// RuleStatusDisabled rule status: Disabled
|
||||
RuleStatusDisabled RuleStatusType = "Disabled"
|
||||
)
|
||||
|
||||
// RestoreTierType defines restore options
|
||||
type RestoreTierType string
|
||||
|
||||
const (
|
||||
// RestoreTierExpedited restore options: Expedited
|
||||
RestoreTierExpedited RestoreTierType = "Expedited"
|
||||
|
||||
// RestoreTierStandard restore options: Standard
|
||||
RestoreTierStandard RestoreTierType = "Standard"
|
||||
|
||||
// RestoreTierBulk restore options: Bulk
|
||||
RestoreTierBulk RestoreTierType = "Bulk"
|
||||
)
|
||||
|
||||
// MetadataDirectiveType defines metadata operation indicator
|
||||
type MetadataDirectiveType string
|
||||
|
||||
const (
|
||||
// CopyMetadata metadata operation: COPY
|
||||
CopyMetadata MetadataDirectiveType = "COPY"
|
||||
|
||||
// ReplaceNew metadata operation: REPLACE_NEW
|
||||
ReplaceNew MetadataDirectiveType = "REPLACE_NEW"
|
||||
|
||||
// ReplaceMetadata metadata operation: REPLACE
|
||||
ReplaceMetadata MetadataDirectiveType = "REPLACE"
|
||||
)
|
||||
|
||||
// EventType defines bucket notification type of events
|
||||
type EventType string
|
||||
|
||||
const (
|
||||
// ObjectCreatedAll type of events: ObjectCreated:*
|
||||
ObjectCreatedAll EventType = "ObjectCreated:*"
|
||||
|
||||
// ObjectCreatedPut type of events: ObjectCreated:Put
|
||||
ObjectCreatedPut EventType = "ObjectCreated:Put"
|
||||
|
||||
// ObjectCreatedPost type of events: ObjectCreated:Post
|
||||
ObjectCreatedPost EventType = "ObjectCreated:Post"
|
||||
|
||||
// ObjectCreatedCopy type of events: ObjectCreated:Copy
|
||||
ObjectCreatedCopy EventType = "ObjectCreated:Copy"
|
||||
|
||||
// ObjectCreatedCompleteMultipartUpload type of events: ObjectCreated:CompleteMultipartUpload
|
||||
ObjectCreatedCompleteMultipartUpload EventType = "ObjectCreated:CompleteMultipartUpload"
|
||||
|
||||
// ObjectRemovedAll type of events: ObjectRemoved:*
|
||||
ObjectRemovedAll EventType = "ObjectRemoved:*"
|
||||
|
||||
// ObjectRemovedDelete type of events: ObjectRemoved:Delete
|
||||
ObjectRemovedDelete EventType = "ObjectRemoved:Delete"
|
||||
|
||||
// ObjectRemovedDeleteMarkerCreated type of events: ObjectRemoved:DeleteMarkerCreated
|
||||
ObjectRemovedDeleteMarkerCreated EventType = "ObjectRemoved:DeleteMarkerCreated"
|
||||
)
|
||||
|
||||
// PayerType defines type of payer
|
||||
type PayerType string
|
||||
|
||||
const (
|
||||
// BucketOwnerPayer type of payer: BucketOwner
|
||||
BucketOwnerPayer PayerType = "BucketOwner"
|
||||
|
||||
// RequesterPayer type of payer: Requester
|
||||
RequesterPayer PayerType = "Requester"
|
||||
|
||||
// Requester header for requester-Pays
|
||||
Requester PayerType = "requester"
|
||||
)
|
||||
|
||||
// FetchPolicyStatusType defines type of fetch policy status
|
||||
type FetchPolicyStatusType string
|
||||
|
||||
const (
|
||||
// FetchStatusOpen type of status: open
|
||||
FetchStatusOpen FetchPolicyStatusType = "open"
|
||||
|
||||
// FetchStatusClosed type of status: closed
|
||||
FetchStatusClosed FetchPolicyStatusType = "closed"
|
||||
)
|
||||
|
||||
// AvailableZoneType defines type of az redundancy
|
||||
type AvailableZoneType string
|
||||
|
||||
const (
|
||||
AvailableZoneMultiAz AvailableZoneType = "3az"
|
||||
)
|
||||
|
||||
// FSStatusType defines type of file system status
|
||||
type FSStatusType string
|
||||
|
||||
const (
|
||||
FSStatusEnabled FSStatusType = "Enabled"
|
||||
FSStatusDisabled FSStatusType = "Disabled"
|
||||
)
|
||||
|
||||
// BucketType defines type of bucket
|
||||
type BucketType string
|
||||
|
||||
const (
|
||||
OBJECT BucketType = "OBJECT"
|
||||
POSIX BucketType = "POSIX"
|
||||
)
|
||||
|
||||
// RedundancyType defines type of redundancyType
|
||||
type BucketRedundancyType string
|
||||
|
||||
const (
|
||||
BucketRedundancyClassic BucketRedundancyType = "CLASSIC"
|
||||
BucketRedundancyFusion BucketRedundancyType = "FUSION"
|
||||
)
|
620
myhwoss/obs/util.go
Normal file
620
myhwoss/obs/util.go
Normal file
@@ -0,0 +1,620 @@
|
||||
// Copyright 2019 Huawei Technologies Co.,Ltd.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
// this file except in compliance with the License. You may obtain a copy of the
|
||||
// License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed
|
||||
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations under the License.
|
||||
|
||||
package obs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var regex = regexp.MustCompile("^[\u4e00-\u9fa5]$")
|
||||
var ipRegex = regexp.MustCompile("^((2[0-4]\\d|25[0-5]|[01]?\\d\\d?)\\.){3}(2[0-4]\\d|25[0-5]|[01]?\\d\\d?)$")
|
||||
var v4AuthRegex = regexp.MustCompile("Credential=(.+?),SignedHeaders=(.+?),Signature=.+")
|
||||
var regionRegex = regexp.MustCompile(".+/\\d+/(.+?)/.+")
|
||||
|
||||
// StringContains replaces subStr in src with subTranscoding and returns the new string
|
||||
func StringContains(src string, subStr string, subTranscoding string) string {
|
||||
return strings.Replace(src, subStr, subTranscoding, -1)
|
||||
}
|
||||
|
||||
// XmlTranscoding replaces special characters with their escaped form
|
||||
func XmlTranscoding(src string) string {
|
||||
srcTmp := StringContains(src, "&", "&")
|
||||
srcTmp = StringContains(srcTmp, "<", "<")
|
||||
srcTmp = StringContains(srcTmp, ">", ">")
|
||||
srcTmp = StringContains(srcTmp, "'", "'")
|
||||
srcTmp = StringContains(srcTmp, "\"", """)
|
||||
return srcTmp
|
||||
}
|
||||
|
||||
func HandleHttpResponse(action string, headers map[string][]string, output IBaseModel, resp *http.Response, xmlResult bool, isObs bool) (err error) {
|
||||
if IsHandleCallbackResponse(action, headers, isObs) {
|
||||
if err = ParseCallbackResponseToBaseModel(resp, output, isObs); err != nil {
|
||||
doLog(LEVEL_WARN, "Parse callback response to BaseModel with error: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err = ParseResponseToBaseModel(resp, output, xmlResult, isObs); err != nil {
|
||||
doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func IsHandleCallbackResponse(action string, headers map[string][]string, isObs bool) bool {
|
||||
var headerPrefix = HEADER_PREFIX
|
||||
if isObs == true {
|
||||
headerPrefix = HEADER_PREFIX_OBS
|
||||
}
|
||||
supportCallbackActions := []string{PUT_OBJECT, PUT_FILE, "CompleteMultipartUpload"}
|
||||
return len(headers[headerPrefix+CALLBACK]) != 0 && IsContain(supportCallbackActions, action)
|
||||
}
|
||||
|
||||
func IsContain(items []string, item string) bool {
|
||||
for _, eachItem := range items {
|
||||
if eachItem == item {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// StringToInt converts string value to int value with default value
|
||||
func StringToInt(value string, def int) int {
|
||||
ret, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
ret = def
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// StringToInt64 converts string value to int64 value with default value
|
||||
func StringToInt64(value string, def int64) int64 {
|
||||
ret, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
ret = def
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// IntToString converts int value to string value
|
||||
func IntToString(value int) string {
|
||||
return strconv.Itoa(value)
|
||||
}
|
||||
|
||||
// Int64ToString converts int64 value to string value
|
||||
func Int64ToString(value int64) string {
|
||||
return strconv.FormatInt(value, 10)
|
||||
}
|
||||
|
||||
// GetCurrentTimestamp gets unix time in milliseconds
|
||||
func GetCurrentTimestamp() int64 {
|
||||
return time.Now().UnixNano() / 1000000
|
||||
}
|
||||
|
||||
// FormatUtcNow gets a textual representation of the UTC format time value
|
||||
func FormatUtcNow(format string) string {
|
||||
return time.Now().UTC().Format(format)
|
||||
}
|
||||
|
||||
// FormatUtcToRfc1123 gets a textual representation of the RFC1123 format time value
|
||||
func FormatUtcToRfc1123(t time.Time) string {
|
||||
ret := t.UTC().Format(time.RFC1123)
|
||||
return ret[:strings.LastIndex(ret, "UTC")] + "GMT"
|
||||
}
|
||||
|
||||
// Md5 gets the md5 value of input
|
||||
func Md5(value []byte) []byte {
|
||||
m := md5.New()
|
||||
_, err := m.Write(value)
|
||||
if err != nil {
|
||||
doLog(LEVEL_WARN, "MD5 failed to write")
|
||||
}
|
||||
return m.Sum(nil)
|
||||
}
|
||||
|
||||
// HmacSha1 gets hmac sha1 value of input
|
||||
func HmacSha1(key, value []byte) []byte {
|
||||
mac := hmac.New(sha1.New, key)
|
||||
_, err := mac.Write(value)
|
||||
if err != nil {
|
||||
doLog(LEVEL_WARN, "HmacSha1 failed to write")
|
||||
}
|
||||
return mac.Sum(nil)
|
||||
}
|
||||
|
||||
// HmacSha256 get hmac sha256 value if input
|
||||
func HmacSha256(key, value []byte) []byte {
|
||||
mac := hmac.New(sha256.New, key)
|
||||
_, err := mac.Write(value)
|
||||
if err != nil {
|
||||
doLog(LEVEL_WARN, "HmacSha256 failed to write")
|
||||
}
|
||||
return mac.Sum(nil)
|
||||
}
|
||||
|
||||
// Base64Encode wrapper of base64.StdEncoding.EncodeToString
|
||||
func Base64Encode(value []byte) string {
|
||||
return base64.StdEncoding.EncodeToString(value)
|
||||
}
|
||||
|
||||
// Base64Decode wrapper of base64.StdEncoding.DecodeString
|
||||
func Base64Decode(value string) ([]byte, error) {
|
||||
return base64.StdEncoding.DecodeString(value)
|
||||
}
|
||||
|
||||
// HexMd5 returns the md5 value of input in hexadecimal format
|
||||
func HexMd5(value []byte) string {
|
||||
return Hex(Md5(value))
|
||||
}
|
||||
|
||||
// Base64Md5 returns the md5 value of input with Base64Encode
|
||||
func Base64Md5(value []byte) string {
|
||||
return Base64Encode(Md5(value))
|
||||
}
|
||||
|
||||
// Sha256Hash returns sha256 checksum
|
||||
func Sha256Hash(value []byte) []byte {
|
||||
hash := sha256.New()
|
||||
_, err := hash.Write(value)
|
||||
if err != nil {
|
||||
doLog(LEVEL_WARN, "Sha256Hash failed to write")
|
||||
}
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
// ParseXml wrapper of xml.Unmarshal
|
||||
func ParseXml(value []byte, result interface{}) error {
|
||||
if len(value) == 0 {
|
||||
return nil
|
||||
}
|
||||
return xml.Unmarshal(value, result)
|
||||
}
|
||||
|
||||
// parseJSON wrapper of json.Unmarshal
|
||||
func parseJSON(value []byte, result interface{}) error {
|
||||
if len(value) == 0 {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal(value, result)
|
||||
}
|
||||
|
||||
// TransToXml wrapper of xml.Marshal
|
||||
func TransToXml(value interface{}) ([]byte, error) {
|
||||
if value == nil {
|
||||
return []byte{}, nil
|
||||
}
|
||||
return xml.Marshal(value)
|
||||
}
|
||||
|
||||
// Hex wrapper of hex.EncodeToString
|
||||
func Hex(value []byte) string {
|
||||
return hex.EncodeToString(value)
|
||||
}
|
||||
|
||||
// HexSha256 returns the Sha256Hash value of input in hexadecimal format
|
||||
func HexSha256(value []byte) string {
|
||||
return Hex(Sha256Hash(value))
|
||||
}
|
||||
|
||||
// UrlDecode wrapper of url.QueryUnescape
|
||||
func UrlDecode(value string) (string, error) {
|
||||
ret, err := url.QueryUnescape(value)
|
||||
if err == nil {
|
||||
return ret, nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// UrlDecodeWithoutError wrapper of UrlDecode
|
||||
func UrlDecodeWithoutError(value string) string {
|
||||
ret, err := UrlDecode(value)
|
||||
if err == nil {
|
||||
return ret
|
||||
}
|
||||
if isErrorLogEnabled() {
|
||||
doLog(LEVEL_ERROR, "Url decode error")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsIP checks whether the value matches ip address
|
||||
func IsIP(value string) bool {
|
||||
return ipRegex.MatchString(value)
|
||||
}
|
||||
|
||||
// UrlEncode encodes the input value
|
||||
func UrlEncode(value string, chineseOnly bool) string {
|
||||
if chineseOnly {
|
||||
values := make([]string, 0, len(value))
|
||||
for _, val := range value {
|
||||
_value := string(val)
|
||||
if regex.MatchString(_value) {
|
||||
_value = url.QueryEscape(_value)
|
||||
}
|
||||
values = append(values, _value)
|
||||
}
|
||||
return strings.Join(values, "")
|
||||
}
|
||||
return url.QueryEscape(value)
|
||||
}
|
||||
|
||||
func copyHeaders(m map[string][]string) (ret map[string][]string) {
|
||||
if m != nil {
|
||||
ret = make(map[string][]string, len(m))
|
||||
for key, values := range m {
|
||||
_values := make([]string, 0, len(values))
|
||||
for _, value := range values {
|
||||
_values = append(_values, value)
|
||||
}
|
||||
ret[strings.ToLower(key)] = _values
|
||||
}
|
||||
} else {
|
||||
ret = make(map[string][]string)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func parseHeaders(headers map[string][]string) (signature string, region string, signedHeaders string) {
|
||||
signature = "v2"
|
||||
if receviedAuthorization, ok := headers[strings.ToLower(HEADER_AUTH_CAMEL)]; ok && len(receviedAuthorization) > 0 {
|
||||
if strings.HasPrefix(receviedAuthorization[0], V4_HASH_PREFIX) {
|
||||
signature = "v4"
|
||||
matches := v4AuthRegex.FindStringSubmatch(receviedAuthorization[0])
|
||||
if len(matches) >= 3 {
|
||||
region = matches[1]
|
||||
regions := regionRegex.FindStringSubmatch(region)
|
||||
if len(regions) >= 2 {
|
||||
region = regions[1]
|
||||
}
|
||||
signedHeaders = matches[2]
|
||||
}
|
||||
|
||||
} else if strings.HasPrefix(receviedAuthorization[0], V2_HASH_PREFIX) {
|
||||
signature = "v2"
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getTemporaryKeys() []string {
|
||||
return []string{
|
||||
"Signature",
|
||||
"signature",
|
||||
"X-Amz-Signature",
|
||||
"x-amz-signature",
|
||||
}
|
||||
}
|
||||
|
||||
func getIsObs(isTemporary bool, querys []string, headers map[string][]string) bool {
|
||||
isObs := true
|
||||
if isTemporary {
|
||||
for _, value := range querys {
|
||||
keyPrefix := strings.ToLower(value)
|
||||
if strings.HasPrefix(keyPrefix, HEADER_PREFIX) {
|
||||
isObs = false
|
||||
} else if strings.HasPrefix(value, HEADER_ACCESSS_KEY_AMZ) {
|
||||
isObs = false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for key := range headers {
|
||||
keyPrefix := strings.ToLower(key)
|
||||
if strings.HasPrefix(keyPrefix, HEADER_PREFIX) {
|
||||
isObs = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return isObs
|
||||
}
|
||||
|
||||
func isPathStyle(headers map[string][]string, bucketName string) bool {
|
||||
if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetV2Authorization v2 Authorization
|
||||
func GetV2Authorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) {
|
||||
|
||||
if strings.HasPrefix(queryURL, "?") {
|
||||
queryURL = queryURL[1:]
|
||||
}
|
||||
|
||||
method = strings.ToUpper(method)
|
||||
|
||||
querys := strings.Split(queryURL, "&")
|
||||
querysResult := make([]string, 0)
|
||||
for _, value := range querys {
|
||||
if value != "=" && len(value) != 0 {
|
||||
querysResult = append(querysResult, value)
|
||||
}
|
||||
}
|
||||
params := make(map[string]string)
|
||||
|
||||
for _, value := range querysResult {
|
||||
kv := strings.Split(value, "=")
|
||||
length := len(kv)
|
||||
if length == 1 {
|
||||
key := UrlDecodeWithoutError(kv[0])
|
||||
params[key] = ""
|
||||
} else if length >= 2 {
|
||||
key := UrlDecodeWithoutError(kv[0])
|
||||
vals := make([]string, 0, length-1)
|
||||
for i := 1; i < length; i++ {
|
||||
val := UrlDecodeWithoutError(kv[i])
|
||||
vals = append(vals, val)
|
||||
}
|
||||
params[key] = strings.Join(vals, "=")
|
||||
}
|
||||
}
|
||||
headers = copyHeaders(headers)
|
||||
pathStyle := isPathStyle(headers, bucketName)
|
||||
conf := &config{securityProviders: []securityProvider{NewBasicSecurityProvider(ak, sk, "")},
|
||||
urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443},
|
||||
pathStyle: pathStyle}
|
||||
conf.signature = SignatureObs
|
||||
_, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
|
||||
ret = v2Auth(ak, sk, method, canonicalizedURL, headers, true)
|
||||
v2HashPrefix := OBS_HASH_PREFIX
|
||||
ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"])
|
||||
return
|
||||
}
|
||||
|
||||
func getQuerysResult(querys []string) []string {
|
||||
querysResult := make([]string, 0)
|
||||
for _, value := range querys {
|
||||
if value != "=" && len(value) != 0 {
|
||||
querysResult = append(querysResult, value)
|
||||
}
|
||||
}
|
||||
return querysResult
|
||||
}
|
||||
|
||||
func getParams(querysResult []string) map[string]string {
|
||||
params := make(map[string]string)
|
||||
for _, value := range querysResult {
|
||||
kv := strings.Split(value, "=")
|
||||
length := len(kv)
|
||||
if length == 1 {
|
||||
key := UrlDecodeWithoutError(kv[0])
|
||||
params[key] = ""
|
||||
} else if length >= 2 {
|
||||
key := UrlDecodeWithoutError(kv[0])
|
||||
vals := make([]string, 0, length-1)
|
||||
for i := 1; i < length; i++ {
|
||||
val := UrlDecodeWithoutError(kv[i])
|
||||
vals = append(vals, val)
|
||||
}
|
||||
params[key] = strings.Join(vals, "=")
|
||||
}
|
||||
}
|
||||
return params
|
||||
}
|
||||
|
||||
func getTemporaryAndSignature(params map[string]string) (bool, string) {
|
||||
isTemporary := false
|
||||
signature := "v2"
|
||||
temporaryKeys := getTemporaryKeys()
|
||||
for _, key := range temporaryKeys {
|
||||
if _, ok := params[key]; ok {
|
||||
isTemporary = true
|
||||
if strings.ToLower(key) == "signature" {
|
||||
signature = "v2"
|
||||
} else if strings.ToLower(key) == "x-amz-signature" {
|
||||
signature = "v4"
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return isTemporary, signature
|
||||
}
|
||||
|
||||
// GetAuthorization Authorization
|
||||
func GetAuthorization(ak, sk, method, bucketName, objectKey, queryURL string, headers map[string][]string) (ret map[string]string) {
|
||||
|
||||
if strings.HasPrefix(queryURL, "?") {
|
||||
queryURL = queryURL[1:]
|
||||
}
|
||||
|
||||
method = strings.ToUpper(method)
|
||||
|
||||
querys := strings.Split(queryURL, "&")
|
||||
querysResult := getQuerysResult(querys)
|
||||
params := getParams(querysResult)
|
||||
|
||||
isTemporary, signature := getTemporaryAndSignature(params)
|
||||
|
||||
isObs := getIsObs(isTemporary, querysResult, headers)
|
||||
headers = copyHeaders(headers)
|
||||
pathStyle := false
|
||||
if receviedHost, ok := headers[HEADER_HOST]; ok && len(receviedHost) > 0 && !strings.HasPrefix(receviedHost[0], bucketName+".") {
|
||||
pathStyle = true
|
||||
}
|
||||
conf := &config{securityProviders: []securityProvider{NewBasicSecurityProvider(ak, sk, "")},
|
||||
urlHolder: &urlHolder{scheme: "https", host: "dummy", port: 443},
|
||||
pathStyle: pathStyle}
|
||||
|
||||
if isTemporary {
|
||||
return getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature, conf, params, headers, isObs)
|
||||
}
|
||||
signature, region, signedHeaders := parseHeaders(headers)
|
||||
if signature == "v4" {
|
||||
conf.signature = SignatureV4
|
||||
requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
|
||||
parsedRequestURL, _err := url.Parse(requestURL)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to parse requestURL")
|
||||
return nil
|
||||
}
|
||||
headerKeys := strings.Split(signedHeaders, ";")
|
||||
_headers := make(map[string][]string, len(headerKeys))
|
||||
for _, headerKey := range headerKeys {
|
||||
_headers[headerKey] = headers[headerKey]
|
||||
}
|
||||
ret = v4Auth(ak, sk, region, method, canonicalizedURL, parsedRequestURL.RawQuery, _headers)
|
||||
ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, ret["Credential"], ret["SignedHeaders"], ret["Signature"])
|
||||
} else if signature == "v2" {
|
||||
if isObs {
|
||||
conf.signature = SignatureObs
|
||||
} else {
|
||||
conf.signature = SignatureV2
|
||||
}
|
||||
_, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
|
||||
ret = v2Auth(ak, sk, method, canonicalizedURL, headers, isObs)
|
||||
v2HashPrefix := V2_HASH_PREFIX
|
||||
if isObs {
|
||||
v2HashPrefix = OBS_HASH_PREFIX
|
||||
}
|
||||
ret[HEADER_AUTH_CAMEL] = fmt.Sprintf("%s %s:%s", v2HashPrefix, ak, ret["Signature"])
|
||||
}
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
func getTemporaryAuthorization(ak, sk, method, bucketName, objectKey, signature string, conf *config, params map[string]string,
|
||||
headers map[string][]string, isObs bool) (ret map[string]string) {
|
||||
|
||||
if signature == "v4" {
|
||||
conf.signature = SignatureV4
|
||||
|
||||
longDate, ok := params[PARAM_DATE_AMZ_CAMEL]
|
||||
if !ok {
|
||||
longDate = params[HEADER_DATE_AMZ]
|
||||
}
|
||||
shortDate := longDate[:8]
|
||||
|
||||
credential, ok := params[PARAM_CREDENTIAL_AMZ_CAMEL]
|
||||
if !ok {
|
||||
credential = params[strings.ToLower(PARAM_CREDENTIAL_AMZ_CAMEL)]
|
||||
}
|
||||
|
||||
_credential := UrlDecodeWithoutError(credential)
|
||||
|
||||
regions := regionRegex.FindStringSubmatch(_credential)
|
||||
var region string
|
||||
if len(regions) >= 2 {
|
||||
region = regions[1]
|
||||
}
|
||||
|
||||
_, scope := getCredential(ak, region, shortDate)
|
||||
|
||||
expires, ok := params[PARAM_EXPIRES_AMZ_CAMEL]
|
||||
if !ok {
|
||||
expires = params[strings.ToLower(PARAM_EXPIRES_AMZ_CAMEL)]
|
||||
}
|
||||
|
||||
signedHeaders, ok := params[PARAM_SIGNEDHEADERS_AMZ_CAMEL]
|
||||
if !ok {
|
||||
signedHeaders = params[strings.ToLower(PARAM_SIGNEDHEADERS_AMZ_CAMEL)]
|
||||
}
|
||||
|
||||
algorithm, ok := params[PARAM_ALGORITHM_AMZ_CAMEL]
|
||||
if !ok {
|
||||
algorithm = params[strings.ToLower(PARAM_ALGORITHM_AMZ_CAMEL)]
|
||||
}
|
||||
|
||||
if _, ok := params[PARAM_SIGNATURE_AMZ_CAMEL]; ok {
|
||||
delete(params, PARAM_SIGNATURE_AMZ_CAMEL)
|
||||
} else if _, ok := params[strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL)]; ok {
|
||||
delete(params, strings.ToLower(PARAM_SIGNATURE_AMZ_CAMEL))
|
||||
}
|
||||
|
||||
ret = make(map[string]string, 6)
|
||||
ret[PARAM_ALGORITHM_AMZ_CAMEL] = algorithm
|
||||
ret[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
|
||||
ret[PARAM_DATE_AMZ_CAMEL] = longDate
|
||||
ret[PARAM_EXPIRES_AMZ_CAMEL] = expires
|
||||
ret[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = signedHeaders
|
||||
|
||||
requestURL, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
|
||||
parsedRequestURL, _err := url.Parse(requestURL)
|
||||
if _err != nil {
|
||||
doLog(LEVEL_WARN, "Failed to parse requestUrl")
|
||||
return nil
|
||||
}
|
||||
stringToSign := getV4StringToSign(method, canonicalizedURL, parsedRequestURL.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, strings.Split(signedHeaders, ";"), headers)
|
||||
ret[PARAM_SIGNATURE_AMZ_CAMEL] = UrlEncode(getSignature(stringToSign, sk, region, shortDate), false)
|
||||
} else if signature == "v2" {
|
||||
if isObs {
|
||||
conf.signature = SignatureObs
|
||||
} else {
|
||||
conf.signature = SignatureV2
|
||||
}
|
||||
_, canonicalizedURL := conf.formatUrls(bucketName, objectKey, params, false)
|
||||
expires, ok := params["Expires"]
|
||||
if !ok {
|
||||
expires = params["expires"]
|
||||
}
|
||||
headers[HEADER_DATE_CAMEL] = []string{expires}
|
||||
stringToSign := getV2StringToSign(method, canonicalizedURL, headers, isObs)
|
||||
ret = make(map[string]string, 3)
|
||||
ret["Signature"] = UrlEncode(Base64Encode(HmacSha1([]byte(sk), []byte(stringToSign))), false)
|
||||
ret["AWSAccessKeyId"] = UrlEncode(ak, false)
|
||||
ret["Expires"] = UrlEncode(expires, false)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func GetContentType(key string) (string, bool) {
|
||||
if ct, ok := mimeTypes[strings.ToLower(key[strings.LastIndex(key, ".")+1:])]; ok {
|
||||
return ct, ok
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func GetReaderLen(reader io.Reader) (int64, error) {
|
||||
var contentLength int64
|
||||
var err error
|
||||
switch v := reader.(type) {
|
||||
case *bytes.Buffer:
|
||||
contentLength = int64(v.Len())
|
||||
case *bytes.Reader:
|
||||
contentLength = int64(v.Len())
|
||||
case *strings.Reader:
|
||||
contentLength = int64(v.Len())
|
||||
case *os.File:
|
||||
fInfo, fError := v.Stat()
|
||||
if fError != nil {
|
||||
err = fmt.Errorf("can't get reader content length,%s", fError.Error())
|
||||
} else {
|
||||
contentLength = fInfo.Size()
|
||||
}
|
||||
case *io.LimitedReader:
|
||||
contentLength = int64(v.N)
|
||||
case *fileReaderWrapper:
|
||||
contentLength = int64(v.totalCount)
|
||||
default:
|
||||
err = fmt.Errorf("can't get reader content length,unkown reader type")
|
||||
}
|
||||
return contentLength, err
|
||||
}
|
Reference in New Issue
Block a user