chore: remove ~50k lines of unreachable dead code (#8913)

* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
This commit is contained in:
Chris Lu
2026-04-03 16:04:27 -07:00
committed by GitHub
parent 8fad85aed7
commit 995dfc4d5d
264 changed files with 62 additions and 46027 deletions

View File

@@ -19,7 +19,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/util/request_id"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"google.golang.org/grpc/metadata"
"github.com/seaweedfs/seaweedfs/weed/filer"
@@ -237,25 +236,6 @@ func parseURLPath(path string) (vid, fid, filename, ext string, isVolumeIdOnly b
return
}
func statsHealthHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
m["Version"] = version.Version()
writeJsonQuiet(w, r, http.StatusOK, m)
}
func statsCounterHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
m["Version"] = version.Version()
m["Counters"] = serverStats
writeJsonQuiet(w, r, http.StatusOK, m)
}
func statsMemoryHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
m["Version"] = version.Version()
m["Memory"] = stats.MemStat()
writeJsonQuiet(w, r, http.StatusOK, m)
}
var StaticFS fs.FS
func handleStaticResources(defaultMux *http.ServeMux) {

View File

@@ -5,7 +5,6 @@ import (
"sync"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/security"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"github.com/seaweedfs/seaweedfs/weed/util/mem"
"github.com/seaweedfs/seaweedfs/weed/util/request_id"
@@ -25,26 +24,6 @@ var (
proxySemaphores sync.Map // host -> chan struct{}
)
func (fs *FilerServer) maybeAddVolumeJwtAuthorization(r *http.Request, fileId string, isWrite bool) {
encodedJwt := fs.maybeGetVolumeJwtAuthorizationToken(fileId, isWrite)
if encodedJwt == "" {
return
}
r.Header.Set("Authorization", "BEARER "+string(encodedJwt))
}
func (fs *FilerServer) maybeGetVolumeJwtAuthorizationToken(fileId string, isWrite bool) string {
var encodedJwt security.EncodedJwt
if isWrite {
encodedJwt = security.GenJwtForVolumeServer(fs.volumeGuard.SigningKey, fs.volumeGuard.ExpiresAfterSec, fileId)
} else {
encodedJwt = security.GenJwtForVolumeServer(fs.volumeGuard.ReadSigningKey, fs.volumeGuard.ReadExpiresAfterSec, fileId)
}
return string(encodedJwt)
}
func acquireProxySemaphore(ctx context.Context, host string) error {
v, _ := proxySemaphores.LoadOrStore(host, make(chan struct{}, proxyReadConcurrencyPerVolumeServer))
sem := v.(chan struct{})

View File

@@ -1,107 +0,0 @@
package weed_server
import (
"bytes"
"context"
"fmt"
"net/http"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/util"
)
// handling single chunk POST or PUT upload
func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) (filerResult *FilerPostResult, err error) {
fileId, urlLocation, auth, err := fs.assignNewFileInfo(ctx, so)
if err != nil || fileId == "" || urlLocation == "" {
return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, so.Collection, so.DataCenter)
}
glog.V(4).InfofCtx(ctx, "write %s to %v", r.URL.Path, urlLocation)
// Note: encrypt(gzip(data)), encrypt data first, then gzip
sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024
bytesBuffer := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(bytesBuffer)
pu, err := needle.ParseUpload(r, sizeLimit, bytesBuffer)
uncompressedData := pu.Data
if pu.IsGzipped {
uncompressedData = pu.UncompressedData
}
if pu.MimeType == "" {
pu.MimeType = http.DetectContentType(uncompressedData)
// println("detect2 mimetype to", pu.MimeType)
}
uploadOption := &operation.UploadOption{
UploadUrl: urlLocation,
Filename: pu.FileName,
Cipher: true,
IsInputCompressed: false,
MimeType: pu.MimeType,
PairMap: pu.PairMap,
Jwt: auth,
}
uploader, uploaderErr := operation.NewUploader()
if uploaderErr != nil {
return nil, fmt.Errorf("uploader initialization error: %w", uploaderErr)
}
uploadResult, uploadError := uploader.UploadData(ctx, uncompressedData, uploadOption)
if uploadError != nil {
return nil, fmt.Errorf("upload to volume server: %w", uploadError)
}
// Save to chunk manifest structure
fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0, time.Now().UnixNano())}
// fmt.Printf("uploaded: %+v\n", uploadResult)
path := r.URL.Path
if strings.HasSuffix(path, "/") {
if pu.FileName != "" {
path += pu.FileName
}
}
entry := &filer.Entry{
FullPath: util.FullPath(path),
Attr: filer.Attr{
Mtime: time.Now(),
Crtime: time.Now(),
Mode: 0660,
Uid: OS_UID,
Gid: OS_GID,
TtlSec: so.TtlSeconds,
Mime: pu.MimeType,
Md5: util.Base64Md5ToBytes(pu.ContentMd5),
},
Chunks: fileChunks,
}
filerResult = &FilerPostResult{
Name: pu.FileName,
Size: int64(pu.OriginalDataSize),
}
if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, false, so.MaxFileNameLength); dbErr != nil {
fs.filer.DeleteUncommittedChunks(ctx, entry.GetChunks())
err = dbErr
filerResult.Error = dbErr.Error()
return
}
return
}

View File

@@ -196,10 +196,6 @@ func (fs *FilerServer) doUpload(ctx context.Context, urlLocation string, limited
return uploadResult, err, data
}
func (fs *FilerServer) dataToChunk(ctx context.Context, fileName, contentType string, data []byte, chunkOffset int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, error) {
return fs.dataToChunkWithSSE(ctx, nil, fileName, contentType, data, chunkOffset, so)
}
func (fs *FilerServer) dataToChunkWithSSE(ctx context.Context, r *http.Request, fileName, contentType string, data []byte, chunkOffset int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, error) {
dataReader := util.NewBytesReader(data)

View File

@@ -697,8 +697,3 @@ func (s *PostgreSQLServer) cleanupIdleSessions() {
}
}
}
// GetAddress returns the server address
func (s *PostgreSQLServer) GetAddress() string {
return fmt.Sprintf("%s:%d", s.config.Host, s.config.Port)
}

View File

@@ -106,10 +106,6 @@ func (vs *VolumeServer) StopHeartbeat() (isAlreadyStopping bool) {
return false
}
func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader pb.ServerAddress, err error) {
return vs.doHeartbeatWithRetry(masterAddress, grpcDialOption, sleepInterval, 0)
}
func (vs *VolumeServer) doHeartbeatWithRetry(masterAddress pb.ServerAddress, grpcDialOption grpc.DialOption, sleepInterval time.Duration, duplicateRetryCount int) (newLeader pb.ServerAddress, err error) {
ctx, cancel := context.WithCancel(context.Background())

View File

@@ -50,19 +50,3 @@ func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) {
m["Volumes"] = vs.store.VolumeInfos()
writeJsonQuiet(w, r, http.StatusOK, m)
}
func (vs *VolumeServer) statsDiskHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "SeaweedFS Volume "+version.VERSION)
m := make(map[string]interface{})
m["Version"] = version.Version()
var ds []*volume_server_pb.DiskStatus
for _, loc := range vs.store.Locations {
if dir, e := filepath.Abs(loc.Directory); e == nil {
newDiskStatus := stats.NewDiskStatus(dir)
newDiskStatus.DiskType = loc.DiskType.String()
ds = append(ds, newDiskStatus)
}
}
m["DiskStatuses"] = ds
writeJsonQuiet(w, r, http.StatusOK, m)
}

View File

@@ -160,11 +160,3 @@ func SetEtag(w http.ResponseWriter, etag string) {
}
}
}
func getEtag(resp *http.Response) (etag string) {
etag = resp.Header.Get("ETag")
if strings.HasPrefix(etag, "\"") && strings.HasSuffix(etag, "\"") {
return etag[1 : len(etag)-1]
}
return
}

View File

@@ -1,69 +0,0 @@
package weed_server
import (
"testing"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage"
)
func TestMaintenanceMode(t *testing.T) {
testCases := []struct {
name string
pb *volume_server_pb.VolumeServerState
want bool
wantCheckErr string
}{
{
name: "non-initialized state",
pb: nil,
want: false,
wantCheckErr: "",
},
{
name: "maintenance mode disabled",
pb: &volume_server_pb.VolumeServerState{
Maintenance: false,
},
want: false,
wantCheckErr: "",
},
{
name: "maintenance mode enabled",
pb: &volume_server_pb.VolumeServerState{
Maintenance: true,
},
want: true,
wantCheckErr: "volume server test_1234 is in maintenance mode",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
vs := VolumeServer{
store: &storage.Store{
Id: "test_1234",
State: storage.NewStateFromProto("/some/path.pb", tc.pb),
},
}
if got, want := vs.MaintenanceMode(), tc.want; got != want {
t.Errorf("MaintenanceMode() returned %v, want %v", got, want)
}
err, wantErrStr := vs.CheckMaintenanceMode(), tc.wantCheckErr
if err != nil {
if wantErrStr == "" {
t.Errorf("CheckMaintenanceMode() returned error %v, want nil", err)
}
if errStr := err.Error(); errStr != wantErrStr {
t.Errorf("CheckMaintenanceMode() returned error %q, want %q", errStr, wantErrStr)
}
} else {
if wantErrStr != "" {
t.Errorf("CheckMaintenanceMode() returned no error, want %q", wantErrStr)
}
}
})
}
}