Merge branch 'master' into bufix/validate-tags-on-copy
This commit is contained in:
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
@@ -22,12 +23,11 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, la
|
||||
if message.NewParentPath != "" {
|
||||
dir = message.NewParentPath
|
||||
}
|
||||
if dir == filer.IamConfigDirecotry && message.NewEntry.Name == filer.IamIdentityFile {
|
||||
if err := s3a.iam.LoadS3ApiConfigurationFromBytes(message.NewEntry.Content); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(0).Infof("updated %s/%s", filer.IamConfigDirecotry, filer.IamIdentityFile)
|
||||
}
|
||||
fileName := message.NewEntry.Name
|
||||
content := message.NewEntry.Content
|
||||
|
||||
_ = s3a.onIamConfigUpdate(dir, fileName, content)
|
||||
_ = s3a.onCircuitBreakerConfigUpdate(dir, fileName, content)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -38,5 +38,26 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, la
|
||||
glog.V(0).Infof("iam follow metadata changes: %v", err)
|
||||
return true
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
//reload iam config
|
||||
func (s3a *S3ApiServer) onIamConfigUpdate(dir, filename string, content []byte) error {
|
||||
if dir == filer.IamConfigDirecotry && filename == filer.IamIdentityFile {
|
||||
if err := s3a.iam.LoadS3ApiConfigurationFromBytes(content); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(0).Infof("updated %s/%s", dir, filename)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//reload circuit breaker config
|
||||
func (s3a *S3ApiServer) onCircuitBreakerConfigUpdate(dir, filename string, content []byte) error {
|
||||
if dir == s3_constants.CircuitBreakerConfigDir && filename == s3_constants.CircuitBreakerConfigFile {
|
||||
if err := s3a.cb.LoadS3ApiConfigurationFromBytes(content); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(0).Infof("updated %s/%s", dir, filename)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
18
weed/s3api/s3_constants/s3_config.go
Normal file
18
weed/s3api/s3_constants/s3_config.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package s3_constants
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
CircuitBreakerConfigDir = "/etc/s3"
|
||||
CircuitBreakerConfigFile = "circuit_breaker.json"
|
||||
AllowedActions = []string{ACTION_READ, ACTION_WRITE, ACTION_LIST, ACTION_TAGGING, ACTION_ADMIN}
|
||||
LimitTypeCount = "Count"
|
||||
LimitTypeBytes = "MB"
|
||||
Separator = ":"
|
||||
)
|
||||
|
||||
func Concat(elements ...string) string {
|
||||
return strings.Join(elements, Separator)
|
||||
}
|
||||
182
weed/s3api/s3api_circuit_breaker.go
Normal file
182
weed/s3api/s3api_circuit_breaker.go
Normal file
@@ -0,0 +1,182 @@
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
"github.com/gorilla/mux"
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type CircuitBreaker struct {
|
||||
sync.Mutex
|
||||
Enabled bool
|
||||
counters map[string]*int64
|
||||
limitations map[string]int64
|
||||
}
|
||||
|
||||
func NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker {
|
||||
cb := &CircuitBreaker{
|
||||
counters: make(map[string]*int64),
|
||||
limitations: make(map[string]int64),
|
||||
}
|
||||
|
||||
err := pb.WithFilerClient(false, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
content, err := filer.ReadInsideFiler(client, s3_constants.CircuitBreakerConfigDir, s3_constants.CircuitBreakerConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read S3 circuit breaker config: %v", err)
|
||||
}
|
||||
return cb.LoadS3ApiConfigurationFromBytes(content)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("fail to load config: %v", err)
|
||||
}
|
||||
|
||||
return cb
|
||||
}
|
||||
|
||||
func (cb *CircuitBreaker) LoadS3ApiConfigurationFromBytes(content []byte) error {
|
||||
cbCfg := &s3_pb.S3CircuitBreakerConfig{}
|
||||
if err := filer.ParseS3ConfigurationFromBytes(content, cbCfg); err != nil {
|
||||
glog.Warningf("unmarshal error: %v", err)
|
||||
return fmt.Errorf("unmarshal error: %v", err)
|
||||
}
|
||||
if err := cb.loadCircuitBreakerConfig(cbCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cb *CircuitBreaker) loadCircuitBreakerConfig(cfg *s3_pb.S3CircuitBreakerConfig) error {
|
||||
|
||||
//global
|
||||
globalEnabled := false
|
||||
globalOptions := cfg.Global
|
||||
limitations := make(map[string]int64)
|
||||
if globalOptions != nil && globalOptions.Enabled && len(globalOptions.Actions) > 0 {
|
||||
globalEnabled = globalOptions.Enabled
|
||||
for action, limit := range globalOptions.Actions {
|
||||
limitations[action] = limit
|
||||
}
|
||||
}
|
||||
cb.Enabled = globalEnabled
|
||||
|
||||
//buckets
|
||||
for bucket, cbOptions := range cfg.Buckets {
|
||||
if cbOptions.Enabled {
|
||||
for action, limit := range cbOptions.Actions {
|
||||
limitations[s3_constants.Concat(bucket, action)] = limit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cb.limitations = limitations
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cb *CircuitBreaker) Limit(f func(w http.ResponseWriter, r *http.Request), action string) (http.HandlerFunc, Action) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !cb.Enabled {
|
||||
f(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
rollback, errCode := cb.limit(r, bucket, action)
|
||||
defer func() {
|
||||
for _, rf := range rollback {
|
||||
rf()
|
||||
}
|
||||
}()
|
||||
|
||||
if errCode == s3err.ErrNone {
|
||||
f(w, r)
|
||||
return
|
||||
}
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
}, Action(action)
|
||||
}
|
||||
|
||||
func (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (rollback []func(), errCode s3err.ErrorCode) {
|
||||
|
||||
//bucket simultaneous request count
|
||||
bucketCountRollBack, errCode := cb.loadCounterAndCompare(bucket, action, s3_constants.LimitTypeCount, 1, s3err.ErrTooManyRequest)
|
||||
if bucketCountRollBack != nil {
|
||||
rollback = append(rollback, bucketCountRollBack)
|
||||
}
|
||||
if errCode != s3err.ErrNone {
|
||||
return
|
||||
}
|
||||
|
||||
//bucket simultaneous request content bytes
|
||||
bucketContentLengthRollBack, errCode := cb.loadCounterAndCompare(bucket, action, s3_constants.LimitTypeBytes, r.ContentLength, s3err.ErrRequestBytesExceed)
|
||||
if bucketContentLengthRollBack != nil {
|
||||
rollback = append(rollback, bucketContentLengthRollBack)
|
||||
}
|
||||
if errCode != s3err.ErrNone {
|
||||
return
|
||||
}
|
||||
|
||||
//global simultaneous request count
|
||||
globalCountRollBack, errCode := cb.loadCounterAndCompare("", action, s3_constants.LimitTypeCount, 1, s3err.ErrTooManyRequest)
|
||||
if globalCountRollBack != nil {
|
||||
rollback = append(rollback, globalCountRollBack)
|
||||
}
|
||||
if errCode != s3err.ErrNone {
|
||||
return
|
||||
}
|
||||
|
||||
//global simultaneous request content bytes
|
||||
globalContentLengthRollBack, errCode := cb.loadCounterAndCompare("", action, s3_constants.LimitTypeBytes, r.ContentLength, s3err.ErrRequestBytesExceed)
|
||||
if globalContentLengthRollBack != nil {
|
||||
rollback = append(rollback, globalContentLengthRollBack)
|
||||
}
|
||||
if errCode != s3err.ErrNone {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (cb *CircuitBreaker) loadCounterAndCompare(bucket, action, limitType string, inc int64, errCode s3err.ErrorCode) (f func(), e s3err.ErrorCode) {
|
||||
key := s3_constants.Concat(bucket, action, limitType)
|
||||
e = s3err.ErrNone
|
||||
if max, ok := cb.limitations[key]; ok {
|
||||
counter, exists := cb.counters[key]
|
||||
if !exists {
|
||||
cb.Lock()
|
||||
counter, exists = cb.counters[key]
|
||||
if !exists {
|
||||
var newCounter int64
|
||||
counter = &newCounter
|
||||
cb.counters[key] = counter
|
||||
}
|
||||
cb.Unlock()
|
||||
}
|
||||
current := atomic.LoadInt64(counter)
|
||||
if current+inc > max {
|
||||
e = errCode
|
||||
return
|
||||
} else {
|
||||
current := atomic.AddInt64(counter, inc)
|
||||
f = func() {
|
||||
atomic.AddInt64(counter, -inc)
|
||||
}
|
||||
current = atomic.LoadInt64(counter)
|
||||
if current > max {
|
||||
e = errCode
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
97
weed/s3api/s3api_circuit_breaker_test.go
Normal file
97
weed/s3api/s3api_circuit_breaker_test.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type TestLimitCase struct {
|
||||
actionName string
|
||||
limitType string
|
||||
bucketLimitValue int64
|
||||
globalLimitValue int64
|
||||
|
||||
routineCount int
|
||||
reqBytes int64
|
||||
|
||||
successCount int64
|
||||
}
|
||||
|
||||
var (
|
||||
bucket = "/test"
|
||||
action = s3_constants.ACTION_READ
|
||||
TestLimitCases = []*TestLimitCase{
|
||||
{action, s3_constants.LimitTypeCount, 5, 5, 6, 1024, 5},
|
||||
{action, s3_constants.LimitTypeCount, 6, 6, 6, 1024, 6},
|
||||
{action, s3_constants.LimitTypeCount, 5, 6, 6, 1024, 5},
|
||||
{action, s3_constants.LimitTypeBytes, 1024, 1024, 6, 200, 5},
|
||||
{action, s3_constants.LimitTypeBytes, 1200, 1200, 6, 200, 6},
|
||||
{action, s3_constants.LimitTypeBytes, 11990, 11990, 60, 200, 59},
|
||||
{action, s3_constants.LimitTypeBytes, 11790, 11990, 70, 200, 58},
|
||||
}
|
||||
)
|
||||
|
||||
func TestLimit(t *testing.T) {
|
||||
for _, tc := range TestLimitCases {
|
||||
circuitBreakerConfig := &s3_pb.S3CircuitBreakerConfig{
|
||||
Global: &s3_pb.S3CircuitBreakerOptions{
|
||||
Enabled: true,
|
||||
Actions: map[string]int64{
|
||||
s3_constants.Concat(tc.actionName, tc.limitType): tc.globalLimitValue,
|
||||
s3_constants.Concat(tc.actionName, tc.limitType): tc.globalLimitValue,
|
||||
},
|
||||
},
|
||||
Buckets: map[string]*s3_pb.S3CircuitBreakerOptions{
|
||||
bucket: {
|
||||
Enabled: true,
|
||||
Actions: map[string]int64{
|
||||
s3_constants.Concat(tc.actionName, tc.limitType): tc.bucketLimitValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
circuitBreaker := &CircuitBreaker{
|
||||
counters: make(map[string]*int64),
|
||||
limitations: make(map[string]int64),
|
||||
}
|
||||
err := circuitBreaker.loadCircuitBreakerConfig(circuitBreakerConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
successCount := doLimit(circuitBreaker, tc.routineCount, &http.Request{ContentLength: tc.reqBytes})
|
||||
if successCount != tc.successCount {
|
||||
t.Errorf("successCount not equal, expect=%d, actual=%d", tc.successCount, successCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doLimit(circuitBreaker *CircuitBreaker, routineCount int, r *http.Request) int64 {
|
||||
var successCounter int64
|
||||
resultCh := make(chan []func(), routineCount)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < routineCount; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
rollbackFn, errCode := circuitBreaker.limit(r, bucket, action)
|
||||
if errCode == s3err.ErrNone {
|
||||
atomic.AddInt64(&successCounter, 1)
|
||||
}
|
||||
resultCh <- rollbackFn
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
close(resultCh)
|
||||
for fns := range resultCh {
|
||||
for _, fn := range fns {
|
||||
fn()
|
||||
}
|
||||
}
|
||||
return successCounter
|
||||
}
|
||||
@@ -4,16 +4,17 @@ import (
|
||||
"crypto/sha1"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
weed_server "github.com/chrislusf/seaweedfs/weed/server"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
weed_server "github.com/chrislusf/seaweedfs/weed/server"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
@@ -119,7 +120,9 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
|
||||
|
||||
glog.V(2).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)))
|
||||
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
//https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
|
||||
s3err.WriteXMLResponse(w, r, http.StatusNoContent, response)
|
||||
s3err.PostLog(r, http.StatusNoContent, s3err.ErrNone)
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -3,13 +3,13 @@ package s3api
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
. "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
@@ -35,6 +35,7 @@ type S3ApiServer struct {
|
||||
s3_pb.UnimplementedSeaweedS3Server
|
||||
option *S3ApiServerOption
|
||||
iam *IdentityAccessManagement
|
||||
cb *CircuitBreaker
|
||||
randomClientId int32
|
||||
filerGuard *security.Guard
|
||||
client *http.Client
|
||||
@@ -55,6 +56,7 @@ func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer
|
||||
iam: NewIdentityAccessManagement(option),
|
||||
randomClientId: util.RandomInt32(),
|
||||
filerGuard: security.NewGuard([]string{}, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec),
|
||||
cb: NewCircuitBreaker(option),
|
||||
}
|
||||
if option.LocalFilerSocket == nil || *option.LocalFilerSocket == "" {
|
||||
s3ApiServer.client = &http.Client{Transport: &http.Transport{
|
||||
@@ -73,7 +75,7 @@ func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer
|
||||
|
||||
s3ApiServer.registerRouter(router)
|
||||
|
||||
go s3ApiServer.subscribeMetaEvents("s3", filer.IamConfigDirecotry+"/"+filer.IamIdentityFile, time.Now().UnixNano())
|
||||
go s3ApiServer.subscribeMetaEvents("s3", filer.DirectoryEtcRoot, time.Now().UnixNano())
|
||||
return s3ApiServer, nil
|
||||
}
|
||||
|
||||
@@ -107,115 +109,115 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
||||
// objects with query
|
||||
|
||||
// CopyObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploads", "")
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.NewMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE), "DELETE")).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.AbortMultipartUploadHandler, ACTION_WRITE)), "DELETE")).Queries("uploadId", "{uploadId:.*}")
|
||||
// ListObjectParts
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_READ), "GET")).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectPartsHandler, ACTION_READ)), "GET")).Queries("uploadId", "{uploadId:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_READ), "GET")).Queries("uploads", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListMultipartUploadsHandler, ACTION_READ)), "GET")).Queries("uploads", "")
|
||||
|
||||
// GetObjectTagging
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectTaggingHandler, ACTION_READ), "GET")).Queries("tagging", "")
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectTaggingHandler, ACTION_READ)), "GET")).Queries("tagging", "")
|
||||
// PutObjectTagging
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectTaggingHandler, ACTION_TAGGING), "PUT")).Queries("tagging", "")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectTaggingHandler, ACTION_TAGGING)), "PUT")).Queries("tagging", "")
|
||||
// DeleteObjectTagging
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING), "DELETE")).Queries("tagging", "")
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING)), "DELETE")).Queries("tagging", "")
|
||||
|
||||
// PutObjectACL
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectAclHandler, ACTION_WRITE), "PUT")).Queries("acl", "")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "")
|
||||
// PutObjectRetention
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectRetentionHandler, ACTION_WRITE), "PUT")).Queries("retention", "")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectRetentionHandler, ACTION_WRITE)), "PUT")).Queries("retention", "")
|
||||
// PutObjectLegalHold
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectLegalHoldHandler, ACTION_WRITE), "PUT")).Queries("legal-hold", "")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLegalHoldHandler, ACTION_WRITE)), "PUT")).Queries("legal-hold", "")
|
||||
// PutObjectLockConfiguration
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE), "PUT")).Queries("object-lock", "")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("object-lock", "")
|
||||
|
||||
// GetObjectACL
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectAclHandler, ACTION_READ), "GET")).Queries("acl", "")
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectAclHandler, ACTION_READ)), "GET")).Queries("acl", "")
|
||||
|
||||
// objects with query
|
||||
|
||||
// raw objects
|
||||
|
||||
// HeadObject
|
||||
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ), "GET"))
|
||||
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadObjectHandler, ACTION_READ)), "GET"))
|
||||
|
||||
// GetObject, but directory listing is not supported
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ), "GET"))
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectHandler, ACTION_READ)), "GET"))
|
||||
|
||||
// CopyObject
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE), "COPY"))
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectHandler, ACTION_WRITE)), "COPY"))
|
||||
// PutObject
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE), "PUT"))
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectHandler, ACTION_WRITE)), "PUT"))
|
||||
// DeleteObject
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE), "DELETE"))
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectHandler, ACTION_WRITE)), "DELETE"))
|
||||
|
||||
// raw objects
|
||||
|
||||
// buckets with query
|
||||
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE), "DELETE")).Queries("delete", "")
|
||||
bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)), "DELETE")).Queries("delete", "")
|
||||
|
||||
// GetBucketACL
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketAclHandler, ACTION_READ), "GET")).Queries("acl", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketAclHandler, ACTION_READ)), "GET")).Queries("acl", "")
|
||||
// PutBucketACL
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketAclHandler, ACTION_WRITE), "PUT")).Queries("acl", "")
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "")
|
||||
|
||||
// GetBucketPolicy
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketPolicyHandler, ACTION_READ), "GET")).Queries("policy", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "")
|
||||
// PutBucketPolicy
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketPolicyHandler, ACTION_WRITE), "PUT")).Queries("policy", "")
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketPolicyHandler, ACTION_WRITE), "DELETE")).Queries("policy", "")
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "")
|
||||
|
||||
// GetBucketCors
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketCorsHandler, ACTION_READ), "GET")).Queries("cors", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketCorsHandler, ACTION_READ)), "GET")).Queries("cors", "")
|
||||
// PutBucketCors
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketCorsHandler, ACTION_WRITE), "PUT")).Queries("cors", "")
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketCorsHandler, ACTION_WRITE)), "PUT")).Queries("cors", "")
|
||||
// DeleteBucketCors
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketCorsHandler, ACTION_WRITE), "DELETE")).Queries("cors", "")
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketCorsHandler, ACTION_WRITE)), "DELETE")).Queries("cors", "")
|
||||
|
||||
// GetBucketLifecycleConfiguration
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ), "GET")).Queries("lifecycle", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ)), "GET")).Queries("lifecycle", "")
|
||||
// PutBucketLifecycleConfiguration
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE), "PUT")).Queries("lifecycle", "")
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("lifecycle", "")
|
||||
// DeleteBucketLifecycleConfiguration
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE), "DELETE")).Queries("lifecycle", "")
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE)), "DELETE")).Queries("lifecycle", "")
|
||||
|
||||
// GetBucketLocation
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketLocationHandler, ACTION_READ), "GET")).Queries("location", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLocationHandler, ACTION_READ)), "GET")).Queries("location", "")
|
||||
|
||||
// GetBucketRequestPayment
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketRequestPaymentHandler, ACTION_READ), "GET")).Queries("requestPayment", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketRequestPaymentHandler, ACTION_READ)), "GET")).Queries("requestPayment", "")
|
||||
|
||||
// ListObjectsV2
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_LIST), "LIST")).Queries("list-type", "2")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV2Handler, ACTION_LIST)), "LIST")).Queries("list-type", "2")
|
||||
|
||||
// buckets with query
|
||||
|
||||
// raw buckets
|
||||
|
||||
// PostPolicy
|
||||
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.PostPolicyBucketHandler, ACTION_WRITE), "POST"))
|
||||
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PostPolicyBucketHandler, ACTION_WRITE)), "POST"))
|
||||
|
||||
// HeadBucket
|
||||
bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_READ), "GET"))
|
||||
bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadBucketHandler, ACTION_READ)), "GET"))
|
||||
|
||||
// PutBucket
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.PutBucketHandler, "PUT"))
|
||||
// DeleteBucket
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE"))
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketHandler, ACTION_WRITE)), "DELETE"))
|
||||
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST"))
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV1Handler, ACTION_LIST)), "LIST"))
|
||||
|
||||
// raw buckets
|
||||
|
||||
|
||||
@@ -104,6 +104,9 @@ const (
|
||||
|
||||
ErrExistingObjectIsDirectory
|
||||
ErrExistingObjectIsFile
|
||||
|
||||
ErrTooManyRequest
|
||||
ErrRequestBytesExceed
|
||||
)
|
||||
|
||||
// error code to APIError structure, these fields carry respective
|
||||
@@ -401,6 +404,16 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "Existing Object is a file.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrTooManyRequest: {
|
||||
Code: "ErrTooManyRequest",
|
||||
Description: "Too many simultaneous request count",
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
},
|
||||
ErrRequestBytesExceed: {
|
||||
Code: "ErrRequestBytesExceed",
|
||||
Description: "Simultaneous request bytes exceed limitations",
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
},
|
||||
}
|
||||
|
||||
// GetAPIError provides API Error for input API error code.
|
||||
|
||||
Reference in New Issue
Block a user