Merge branch 'master' into mq-subscribe
This commit is contained in:
@@ -52,7 +52,7 @@ func main() {
|
||||
})
|
||||
|
||||
subscriber.SetCompletionFunc(func() {
|
||||
glog.V(0).Infof("done recived %d messages", counter)
|
||||
glog.V(0).Infof("done received %d messages", counter)
|
||||
})
|
||||
|
||||
if err := subscriber.Subscribe(); err != nil {
|
||||
|
||||
@@ -103,7 +103,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
|
||||
if len(entries) == 0 {
|
||||
entryName, dirName := s3a.getEntryNameAndDir(input)
|
||||
if entry, _ := s3a.getEntry(dirName, entryName); entry != nil && entry.Extended != nil {
|
||||
if uploadId, ok := entry.Extended[s3_constants.X_SeaweedFS_Header_Upload_Id]; ok && *input.UploadId == string(uploadId) {
|
||||
if uploadId, ok := entry.Extended[s3_constants.SeaweedFSUploadId]; ok && *input.UploadId == string(uploadId) {
|
||||
return &CompleteMultipartUploadResult{
|
||||
CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
|
||||
Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))),
|
||||
@@ -222,7 +222,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
|
||||
if entry.Extended == nil {
|
||||
entry.Extended = make(map[string][]byte)
|
||||
}
|
||||
entry.Extended[s3_constants.X_SeaweedFS_Header_Upload_Id] = []byte(*input.UploadId)
|
||||
entry.Extended[s3_constants.SeaweedFSUploadId] = []byte(*input.UploadId)
|
||||
for k, v := range pentry.Extended {
|
||||
if k != "key" {
|
||||
entry.Extended[k] = v
|
||||
|
||||
@@ -38,8 +38,9 @@ const (
|
||||
AmzObjectTaggingDirective = "X-Amz-Tagging-Directive"
|
||||
AmzTagCount = "x-amz-tagging-count"
|
||||
|
||||
X_SeaweedFS_Header_Directory_Key = "x-seaweedfs-is-directory-key"
|
||||
X_SeaweedFS_Header_Upload_Id = "X-Seaweedfs-Upload-Id"
|
||||
SeaweedFSIsDirectoryKey = "X-Seaweedfs-Is-Directory-Key"
|
||||
SeaweedFSPartNumber = "X-Seaweedfs-Part-Number"
|
||||
SeaweedFSUploadId = "X-Seaweedfs-Upload-Id"
|
||||
|
||||
// S3 ACL headers
|
||||
AmzCannedAcl = "X-Amz-Acl"
|
||||
@@ -48,6 +49,8 @@ const (
|
||||
AmzAclWrite = "X-Amz-Grant-Write"
|
||||
AmzAclReadAcp = "X-Amz-Grant-Read-Acp"
|
||||
AmzAclWriteAcp = "X-Amz-Grant-Write-Acp"
|
||||
|
||||
AmzMpPartsCount = "X-Amz-Mp-Parts-Count"
|
||||
)
|
||||
|
||||
// Non-Standard S3 HTTP request constants
|
||||
|
||||
@@ -370,6 +370,9 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
|
||||
if _, ok := s3_constants.PassThroughHeaders[strings.ToLower(k)]; ok {
|
||||
proxyReq.Header[k] = v
|
||||
}
|
||||
if k == "partNumber" {
|
||||
proxyReq.Header[s3_constants.SeaweedFSPartNumber] = v
|
||||
}
|
||||
}
|
||||
for header, values := range r.Header {
|
||||
proxyReq.Header[header] = values
|
||||
@@ -411,7 +414,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
|
||||
}
|
||||
|
||||
TimeToFirstByte(r.Method, start, r)
|
||||
if resp.Header.Get(s3_constants.X_SeaweedFS_Header_Directory_Key) == "true" {
|
||||
if resp.Header.Get(s3_constants.SeaweedFSIsDirectoryKey) == "true" {
|
||||
responseStatusCode := responseFn(resp, w)
|
||||
s3err.PostLog(r, responseStatusCode, s3err.ErrNone)
|
||||
return
|
||||
@@ -429,6 +432,18 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusBadRequest {
|
||||
resp_body, _ := io.ReadAll(resp.Body)
|
||||
switch string(resp_body) {
|
||||
case "InvalidPart":
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
|
||||
default:
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
|
||||
}
|
||||
resp.Body.Close()
|
||||
return
|
||||
}
|
||||
|
||||
setUserMetadataKeyToLowercase(resp)
|
||||
|
||||
responseStatusCode := responseFn(resp, w)
|
||||
|
||||
@@ -177,7 +177,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
|
||||
fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete")
|
||||
v.SetDefault("filer.options.buckets_folder", "/buckets")
|
||||
fs.filer.DirBucketsPath = v.GetString("filer.options.buckets_folder")
|
||||
// TODO deprecated, will be be removed after 2020-12-31
|
||||
// TODO deprecated, will be removed after 2020-12-31
|
||||
// replaced by https://github.com/seaweedfs/seaweedfs/wiki/Path-Specific-Configuration
|
||||
// fs.filer.FsyncBuckets = v.GetStringSlice("filer.options.buckets_fsync")
|
||||
isFresh := fs.filer.LoadConfiguration(v)
|
||||
|
||||
@@ -3,6 +3,8 @@ package weed_server
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -132,7 +134,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
// inform S3 API this is a user created directory key object
|
||||
w.Header().Set(s3_constants.X_SeaweedFS_Header_Directory_Key, "true")
|
||||
w.Header().Set(s3_constants.SeaweedFSIsDirectoryKey, "true")
|
||||
}
|
||||
|
||||
if isForDirectory && entry.Attr.Mime != s3_constants.FolderMimeType {
|
||||
@@ -158,7 +160,22 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
etag := filer.ETagEntry(entry)
|
||||
var etag string
|
||||
if partNumber, errNum := strconv.Atoi(r.Header.Get(s3_constants.SeaweedFSPartNumber)); errNum == nil {
|
||||
if len(entry.Chunks) < partNumber {
|
||||
stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadChunk).Inc()
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
w.Write([]byte("InvalidPart"))
|
||||
return
|
||||
}
|
||||
w.Header().Set(s3_constants.AmzMpPartsCount, strconv.Itoa(len(entry.Chunks)))
|
||||
partChunk := entry.GetChunks()[partNumber-1]
|
||||
md5, _ := base64.StdEncoding.DecodeString(partChunk.ETag)
|
||||
etag = hex.EncodeToString(md5)
|
||||
r.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", partChunk.Offset, uint64(partChunk.Offset)+partChunk.Size-1))
|
||||
} else {
|
||||
etag = filer.ETagEntry(entry)
|
||||
}
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
|
||||
// mime type
|
||||
@@ -207,7 +224,6 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
filename := entry.Name()
|
||||
adjustPassthroughHeaders(w, r, filename)
|
||||
|
||||
totalSize := int64(entry.Size())
|
||||
|
||||
if r.Method == "HEAD" {
|
||||
|
||||
@@ -97,7 +97,7 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}()
|
||||
|
||||
// processs uploads
|
||||
// processes uploads
|
||||
stats.WriteRequest()
|
||||
vs.guard.WhiteList(vs.PostHandler)(w, r)
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package shell
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
|
||||
@@ -271,3 +271,18 @@ func TestVolumeSelection(t *testing.T) {
|
||||
assert.Equal(t, 378, len(vids))
|
||||
|
||||
}
|
||||
|
||||
func TestDeleteEmptySelection(t *testing.T) {
|
||||
topologyInfo := parseOutput(topoData)
|
||||
|
||||
eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if v.Size <= super_block.SuperBlockSize && v.ModifiedAtSecond > 0 {
|
||||
fmt.Printf("empty volume %d from %s\n", v.Id, dn.Id)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@ const (
|
||||
ErrorWriteEntry = "write.entry.failed"
|
||||
RepeatErrorUploadContent = "upload.content.repeat.failed"
|
||||
ErrorChunkAssign = "chunkAssign.failed"
|
||||
ErrorReadChunk = "read.chunk.failed"
|
||||
ErrorReadCache = "read.cache.failed"
|
||||
ErrorReadStream = "read.stream.failed"
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
const HttpStatusCancelled = 499
|
||||
|
||||
var (
|
||||
VERSION_NUMBER = fmt.Sprintf("%.02f", 3.64)
|
||||
VERSION_NUMBER = fmt.Sprintf("%.02f", 3.65)
|
||||
VERSION = sizeLimit + " " + VERSION_NUMBER
|
||||
COMMIT = ""
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user