Files
seaweedFS/weed/server/filer_server_handlers_write_upload.go
Chris Lu 995dfc4d5d chore: remove ~50k lines of unreachable dead code (#8913)
* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
2026-04-03 16:04:27 -07:00

259 lines
8.1 KiB
Go

package weed_server
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"fmt"
"hash"
"io"
"net/http"
"strconv"
"sync"
"time"
"slices"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util"
)
var bufPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
func (fs *FilerServer) uploadRequestToChunks(ctx context.Context, w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) {
query := r.URL.Query()
isAppend := isAppend(r)
if query.Has("offset") {
offset := query.Get("offset")
offsetInt, err := strconv.ParseInt(offset, 10, 64)
if err != nil || offsetInt < 0 {
err = fmt.Errorf("invalid 'offset': '%s'", offset)
return nil, nil, 0, err, nil
}
if isAppend && offsetInt > 0 {
err = fmt.Errorf("cannot set offset when op=append")
return nil, nil, 0, err, nil
}
chunkOffset = offsetInt
}
return fs.uploadReaderToChunks(ctx, r, reader, chunkOffset, chunkSize, fileName, contentType, isAppend, so)
}
func (fs *FilerServer) uploadReaderToChunks(ctx context.Context, r *http.Request, reader io.Reader, startOffset int64, chunkSize int32, fileName, contentType string, isAppend bool, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) {
md5Hash = md5.New()
chunkOffset = startOffset
var partReader = io.NopCloser(io.TeeReader(reader, md5Hash))
var wg sync.WaitGroup
var bytesBufferCounter int64 = 4
bytesBufferLimitChan := make(chan struct{}, bytesBufferCounter)
var fileChunksLock sync.Mutex
var uploadErrLock sync.Mutex
for {
// need to throttle used byte buffer
bytesBufferLimitChan <- struct{}{}
// As long as there is an error in the upload of one chunk, it can be terminated early
// uploadErr may be modified in other go routines, lock is needed to avoid race condition
uploadErrLock.Lock()
if uploadErr != nil {
<-bytesBufferLimitChan
uploadErrLock.Unlock()
break
}
uploadErrLock.Unlock()
bytesBuffer := bufPool.Get().(*bytes.Buffer)
limitedReader := io.LimitReader(partReader, int64(chunkSize))
bytesBuffer.Reset()
dataSize, err := bytesBuffer.ReadFrom(limitedReader)
// data, err := io.ReadAll(limitedReader)
if err != nil || dataSize == 0 {
bufPool.Put(bytesBuffer)
<-bytesBufferLimitChan
if err != nil {
uploadErrLock.Lock()
if uploadErr == nil {
uploadErr = err
}
uploadErrLock.Unlock()
}
break
}
if chunkOffset == 0 && !isAppend {
if dataSize < fs.option.SaveToFilerLimit {
chunkOffset += dataSize
smallContent = make([]byte, dataSize)
bytesBuffer.Read(smallContent)
bufPool.Put(bytesBuffer)
<-bytesBufferLimitChan
stats.FilerHandlerCounter.WithLabelValues(stats.ContentSaveToFiler).Inc()
break
}
} else {
stats.FilerHandlerCounter.WithLabelValues(stats.AutoChunk).Inc()
}
wg.Add(1)
go func(offset int64, buf *bytes.Buffer) {
defer func() {
bufPool.Put(buf)
<-bytesBufferLimitChan
wg.Done()
}()
chunks, toChunkErr := fs.dataToChunkWithSSE(ctx, r, fileName, contentType, buf.Bytes(), offset, so)
if toChunkErr != nil {
uploadErrLock.Lock()
if uploadErr == nil {
uploadErr = toChunkErr
}
uploadErrLock.Unlock()
}
if chunks != nil {
fileChunksLock.Lock()
for _, chunk := range chunks {
fileChunks = append(fileChunks, chunk)
}
fileChunksLock.Unlock()
}
}(chunkOffset, bytesBuffer)
// reset variables for the next chunk
glog.V(4).Infof("uploadReaderToChunks read chunk at offset %d, size %d", chunkOffset, dataSize)
chunkOffset = chunkOffset + dataSize
// if last chunk was not at full chunk size, but already exhausted the reader
if dataSize < int64(chunkSize) {
break
}
}
wg.Wait()
if uploadErr != nil {
glog.V(0).InfofCtx(ctx, "upload file %s error: %v", fileName, uploadErr)
for _, chunk := range fileChunks {
glog.V(4).InfofCtx(ctx, "purging failed uploaded %s chunk %s [%d,%d)", fileName, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
}
fs.filer.DeleteUncommittedChunks(ctx, fileChunks)
return nil, md5Hash, 0, uploadErr, nil
}
slices.SortFunc(fileChunks, func(a, b *filer_pb.FileChunk) int {
return int(a.Offset - b.Offset)
})
return fileChunks, md5Hash, chunkOffset, nil, smallContent
}
func (fs *FilerServer) doUpload(ctx context.Context, urlLocation string, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt, contentMd5 string) (*operation.UploadResult, error, []byte) {
stats.FilerHandlerCounter.WithLabelValues(stats.ChunkUpload).Inc()
start := time.Now()
defer func() {
stats.FilerRequestHistogram.WithLabelValues(stats.ChunkUpload).Observe(time.Since(start).Seconds())
}()
uploadOption := &operation.UploadOption{
UploadUrl: urlLocation,
Filename: fileName,
Cipher: fs.option.Cipher,
IsInputCompressed: false,
MimeType: contentType,
PairMap: pairMap,
Jwt: auth,
Md5: contentMd5,
}
uploader, err := operation.NewUploader()
if err != nil {
return nil, err, []byte{}
}
// Use a context that ignores cancellation from the request context
uploadCtx := context.WithoutCancel(ctx)
uploadResult, err, data := uploader.Upload(uploadCtx, limitedReader, uploadOption)
if uploadResult != nil && uploadResult.RetryCount > 0 {
stats.FilerHandlerCounter.WithLabelValues(stats.ChunkUploadRetry).Add(float64(uploadResult.RetryCount))
}
return uploadResult, err, data
}
func (fs *FilerServer) dataToChunkWithSSE(ctx context.Context, r *http.Request, fileName, contentType string, data []byte, chunkOffset int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, error) {
dataReader := util.NewBytesReader(data)
// retry to assign a different file id
var fileId, urlLocation string
var auth security.EncodedJwt
var uploadErr error
var uploadResult *operation.UploadResult
var failedFileChunks []*filer_pb.FileChunk
err := util.Retry("filerDataToChunk", func() error {
// assign one file id for one chunk
fileId, urlLocation, auth, uploadErr = fs.assignNewFileInfo(ctx, so)
if uploadErr != nil {
glog.V(4).InfofCtx(ctx, "retry later due to assign error: %v", uploadErr)
stats.FilerHandlerCounter.WithLabelValues(stats.ChunkAssignRetry).Inc()
return uploadErr
}
chunkMd5 := md5.Sum(data)
chunkMd5B64 := base64.StdEncoding.EncodeToString(chunkMd5[:])
// upload the chunk to the volume server
uploadResult, uploadErr, _ = fs.doUpload(ctx, urlLocation, dataReader, fileName, contentType, nil, auth, chunkMd5B64)
if uploadErr != nil {
glog.V(4).InfofCtx(ctx, "retry later due to upload error: %v", uploadErr)
stats.FilerHandlerCounter.WithLabelValues(stats.ChunkDoUploadRetry).Inc()
fid, _ := filer_pb.ToFileIdObject(fileId)
fileChunk := filer_pb.FileChunk{
FileId: fileId,
Offset: chunkOffset,
Fid: fid,
}
failedFileChunks = append(failedFileChunks, &fileChunk)
return uploadErr
}
return nil
})
if err != nil {
glog.ErrorfCtx(ctx, "upload error: %v", err)
return failedFileChunks, err
}
// if last chunk exhausted the reader exactly at the border
if uploadResult.Size == 0 {
return nil, nil
}
// Extract SSE metadata from request headers if available
var sseType filer_pb.SSEType = filer_pb.SSEType_NONE
var sseMetadata []byte
// Create chunk with SSE metadata if available
var chunk *filer_pb.FileChunk
if sseType != filer_pb.SSEType_NONE {
chunk = uploadResult.ToPbFileChunkWithSSE(fileId, chunkOffset, time.Now().UnixNano(), sseType, sseMetadata)
} else {
chunk = uploadResult.ToPbFileChunk(fileId, chunkOffset, time.Now().UnixNano())
}
return []*filer_pb.FileChunk{chunk}, nil
}