Files
seaweedFS/weed/operation/assign_file_id.go
Chris Lu 995dfc4d5d chore: remove ~50k lines of unreachable dead code (#8913)
* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
2026-04-03 16:04:27 -07:00

204 lines
5.8 KiB
Go

package operation
import (
"context"
"fmt"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type VolumeAssignRequest struct {
Count uint64
Replication string
Collection string
Ttl string
DiskType string
DataCenter string
Rack string
DataNode string
WritableVolumeCount uint32
}
type AssignResult struct {
Fid string `json:"fid,omitempty"`
Url string `json:"url,omitempty"`
PublicUrl string `json:"publicUrl,omitempty"`
GrpcPort int `json:"grpcPort,omitempty"`
Count uint64 `json:"count,omitempty"`
Error string `json:"error,omitempty"`
Auth security.EncodedJwt `json:"auth,omitempty"`
Replicas []Location `json:"replicas,omitempty"`
}
func Assign(ctx context.Context, masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
var requests []*VolumeAssignRequest
requests = append(requests, primaryRequest)
requests = append(requests, alternativeRequests...)
var lastError error
ret := &AssignResult{}
// Compute a single deadline so all request entries (primary + fallback)
// share one 30s retry budget instead of each getting its own.
// Use a deadline-aware context so both RetryWithBackoff and per-attempt
// timeouts are bounded by the shared budget.
deadline := time.Now().Add(30 * time.Second)
deadlineCtx, deadlineCancel := context.WithDeadline(ctx, deadline)
defer deadlineCancel()
for i, request := range requests {
if request == nil {
continue
}
remaining := time.Until(deadline)
if remaining <= 0 {
break
}
lastError = util.RetryWithBackoff(deadlineCtx, "assign", remaining,
func(err error) bool {
st, ok := status.FromError(err)
return ok && st.Code() == codes.Unavailable
},
func() error {
// Per-attempt timeout to prevent a single slow RPC from consuming the entire retry budget
attemptCtx, attemptCancel := context.WithTimeout(deadlineCtx, 10*time.Second)
defer attemptCancel()
return WithMasterServerClient(false, masterFn(attemptCtx), grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.AssignRequest{
Count: request.Count,
Replication: request.Replication,
Collection: request.Collection,
Ttl: request.Ttl,
DiskType: request.DiskType,
DataCenter: request.DataCenter,
Rack: request.Rack,
DataNode: request.DataNode,
WritableVolumeCount: request.WritableVolumeCount,
}
resp, grpcErr := masterClient.Assign(attemptCtx, req)
if grpcErr != nil {
return grpcErr
}
if resp.Error != "" {
return fmt.Errorf("assignRequest: %v", resp.Error)
}
ret.Count = resp.Count
ret.Fid = resp.Fid
ret.Url = resp.Location.Url
ret.PublicUrl = resp.Location.PublicUrl
ret.GrpcPort = int(resp.Location.GrpcPort)
ret.Error = resp.Error
ret.Auth = security.EncodedJwt(resp.Auth)
ret.Replicas = nil
for _, r := range resp.Replicas {
ret.Replicas = append(ret.Replicas, Location{
Url: r.Url,
PublicUrl: r.PublicUrl,
DataCenter: r.DataCenter,
})
}
return nil
})
})
if lastError != nil {
stats.FilerHandlerCounter.WithLabelValues(stats.ErrorChunkAssign).Inc()
continue
}
if ret.Count <= 0 {
lastError = fmt.Errorf("assign failure %d: %v", i+1, ret.Error)
continue
}
break
}
return ret, lastError
}
func LookupJwt(master pb.ServerAddress, grpcDialOption grpc.DialOption, fileId string) (token security.EncodedJwt) {
WithMasterServerClient(false, master, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
resp, grpcErr := masterClient.LookupVolume(context.Background(), &master_pb.LookupVolumeRequest{
VolumeOrFileIds: []string{fileId},
})
if grpcErr != nil {
return grpcErr
}
if len(resp.VolumeIdLocations) == 0 {
return nil
}
token = security.EncodedJwt(resp.VolumeIdLocations[0].Auth)
return nil
})
return
}
type StorageOption struct {
Replication string
DiskType string
Collection string
DataCenter string
Rack string
DataNode string
TtlSeconds int32
VolumeGrowthCount uint32
MaxFileNameLength uint32
Fsync bool
SaveInside bool
}
func (so *StorageOption) TtlString() string {
return needle.SecondsToTTL(so.TtlSeconds)
}
func (so *StorageOption) ToAssignRequests(count int) (ar *VolumeAssignRequest, altRequest *VolumeAssignRequest) {
ar = &VolumeAssignRequest{
Count: uint64(count),
Replication: so.Replication,
Collection: so.Collection,
Ttl: so.TtlString(),
DiskType: so.DiskType,
DataCenter: so.DataCenter,
Rack: so.Rack,
DataNode: so.DataNode,
WritableVolumeCount: so.VolumeGrowthCount,
}
if so.DataCenter != "" || so.Rack != "" || so.DataNode != "" {
altRequest = &VolumeAssignRequest{
Count: uint64(count),
Replication: so.Replication,
Collection: so.Collection,
Ttl: so.TtlString(),
DiskType: so.DiskType,
DataCenter: "",
Rack: "",
DataNode: "",
WritableVolumeCount: so.VolumeGrowthCount,
}
}
return
}