chore: remove ~50k lines of unreachable dead code (#8913)

* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
This commit is contained in:
Chris Lu
2026-04-03 16:04:27 -07:00
committed by GitHub
parent 8fad85aed7
commit 995dfc4d5d
264 changed files with 62 additions and 46027 deletions

View File

@@ -1,8 +1,6 @@
package shell
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding/distribution"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
@@ -13,18 +11,6 @@ import (
// ECDistribution is an alias to the distribution package type for backward compatibility
type ECDistribution = distribution.ECDistribution
// CalculateECDistribution computes the target EC shard distribution based on replication policy.
// This is a convenience wrapper that uses the default 10+4 EC configuration.
// For custom EC ratios, use the distribution package directly.
func CalculateECDistribution(totalShards, parityShards int, rp *super_block.ReplicaPlacement) *ECDistribution {
ec := distribution.ECConfig{
DataShards: totalShards - parityShards,
ParityShards: parityShards,
}
rep := distribution.NewReplicationConfig(rp)
return distribution.CalculateDistribution(ec, rep)
}
// TopologyDistributionAnalysis holds the current shard distribution analysis
// This wraps the distribution package's TopologyAnalysis with shell-specific EcNode handling
type TopologyDistributionAnalysis struct {
@@ -34,99 +20,6 @@ type TopologyDistributionAnalysis struct {
nodeMap map[string]*EcNode // nodeID -> EcNode
}
// NewTopologyDistributionAnalysis creates a new analysis structure
func NewTopologyDistributionAnalysis() *TopologyDistributionAnalysis {
return &TopologyDistributionAnalysis{
inner: distribution.NewTopologyAnalysis(),
nodeMap: make(map[string]*EcNode),
}
}
// AddNode adds a node and its shards to the analysis
func (a *TopologyDistributionAnalysis) AddNode(node *EcNode, shardsInfo *erasure_coding.ShardsInfo) {
nodeId := node.info.Id
// Create distribution.TopologyNode from EcNode
topoNode := &distribution.TopologyNode{
NodeID: nodeId,
DataCenter: string(node.dc),
Rack: string(node.rack),
FreeSlots: node.freeEcSlot,
TotalShards: shardsInfo.Count(),
ShardIDs: shardsInfo.IdsInt(),
}
a.inner.AddNode(topoNode)
a.nodeMap[nodeId] = node
// Add shard locations
for _, shardId := range shardsInfo.Ids() {
a.inner.AddShardLocation(distribution.ShardLocation{
ShardID: int(shardId),
NodeID: nodeId,
DataCenter: string(node.dc),
Rack: string(node.rack),
})
}
}
// Finalize completes the analysis
func (a *TopologyDistributionAnalysis) Finalize() {
a.inner.Finalize()
}
// String returns a summary
func (a *TopologyDistributionAnalysis) String() string {
return a.inner.String()
}
// DetailedString returns detailed analysis
func (a *TopologyDistributionAnalysis) DetailedString() string {
return a.inner.DetailedString()
}
// GetShardsByDC returns shard counts by DC
func (a *TopologyDistributionAnalysis) GetShardsByDC() map[DataCenterId]int {
result := make(map[DataCenterId]int)
for dc, count := range a.inner.ShardsByDC {
result[DataCenterId(dc)] = count
}
return result
}
// GetShardsByRack returns shard counts by rack
func (a *TopologyDistributionAnalysis) GetShardsByRack() map[RackId]int {
result := make(map[RackId]int)
for rack, count := range a.inner.ShardsByRack {
result[RackId(rack)] = count
}
return result
}
// GetShardsByNode returns shard counts by node
func (a *TopologyDistributionAnalysis) GetShardsByNode() map[EcNodeId]int {
result := make(map[EcNodeId]int)
for nodeId, count := range a.inner.ShardsByNode {
result[EcNodeId(nodeId)] = count
}
return result
}
// AnalyzeVolumeDistribution creates an analysis of current shard distribution for a volume
func AnalyzeVolumeDistribution(volumeId needle.VolumeId, locations []*EcNode, diskType types.DiskType) *TopologyDistributionAnalysis {
analysis := NewTopologyDistributionAnalysis()
for _, node := range locations {
si := findEcVolumeShardsInfo(node, volumeId, diskType)
if si.Count() > 0 {
analysis.AddNode(node, si)
}
}
analysis.Finalize()
return analysis
}
// ECShardMove represents a planned shard move (shell-specific with EcNode references)
type ECShardMove struct {
VolumeId needle.VolumeId
@@ -136,12 +29,6 @@ type ECShardMove struct {
Reason string
}
// String returns a human-readable description
func (m ECShardMove) String() string {
return fmt.Sprintf("volume %d shard %d: %s -> %s (%s)",
m.VolumeId, m.ShardId, m.SourceNode.info.Id, m.DestNode.info.Id, m.Reason)
}
// ProportionalECRebalancer implements proportional shard distribution for shell commands
type ProportionalECRebalancer struct {
ecNodes []*EcNode
@@ -149,133 +36,3 @@ type ProportionalECRebalancer struct {
diskType types.DiskType
ecConfig distribution.ECConfig
}
// NewProportionalECRebalancer creates a new proportional rebalancer with default EC config
func NewProportionalECRebalancer(
ecNodes []*EcNode,
rp *super_block.ReplicaPlacement,
diskType types.DiskType,
) *ProportionalECRebalancer {
return NewProportionalECRebalancerWithConfig(
ecNodes,
rp,
diskType,
distribution.DefaultECConfig(),
)
}
// NewProportionalECRebalancerWithConfig creates a rebalancer with custom EC configuration
func NewProportionalECRebalancerWithConfig(
ecNodes []*EcNode,
rp *super_block.ReplicaPlacement,
diskType types.DiskType,
ecConfig distribution.ECConfig,
) *ProportionalECRebalancer {
return &ProportionalECRebalancer{
ecNodes: ecNodes,
replicaPlacement: rp,
diskType: diskType,
ecConfig: ecConfig,
}
}
// PlanMoves generates a plan for moving shards to achieve proportional distribution
func (r *ProportionalECRebalancer) PlanMoves(
volumeId needle.VolumeId,
locations []*EcNode,
) ([]ECShardMove, error) {
// Build topology analysis
analysis := distribution.NewTopologyAnalysis()
nodeMap := make(map[string]*EcNode)
// Add all EC nodes to the analysis (even those without shards)
for _, node := range r.ecNodes {
nodeId := node.info.Id
topoNode := &distribution.TopologyNode{
NodeID: nodeId,
DataCenter: string(node.dc),
Rack: string(node.rack),
FreeSlots: node.freeEcSlot,
}
analysis.AddNode(topoNode)
nodeMap[nodeId] = node
}
// Add shard locations from nodes that have shards
for _, node := range locations {
nodeId := node.info.Id
si := findEcVolumeShardsInfo(node, volumeId, r.diskType)
for _, shardId := range si.Ids() {
analysis.AddShardLocation(distribution.ShardLocation{
ShardID: int(shardId),
NodeID: nodeId,
DataCenter: string(node.dc),
Rack: string(node.rack),
})
}
if _, exists := nodeMap[nodeId]; !exists {
nodeMap[nodeId] = node
}
}
analysis.Finalize()
// Create rebalancer and plan moves
rep := distribution.NewReplicationConfig(r.replicaPlacement)
rebalancer := distribution.NewRebalancer(r.ecConfig, rep)
plan, err := rebalancer.PlanRebalance(analysis)
if err != nil {
return nil, err
}
// Convert distribution moves to shell moves
var moves []ECShardMove
for _, move := range plan.Moves {
srcNode := nodeMap[move.SourceNode.NodeID]
destNode := nodeMap[move.DestNode.NodeID]
if srcNode == nil || destNode == nil {
continue
}
moves = append(moves, ECShardMove{
VolumeId: volumeId,
ShardId: erasure_coding.ShardId(move.ShardID),
SourceNode: srcNode,
DestNode: destNode,
Reason: move.Reason,
})
}
return moves, nil
}
// GetDistributionSummary returns a summary of the planned distribution
func GetDistributionSummary(rp *super_block.ReplicaPlacement) string {
ec := distribution.DefaultECConfig()
rep := distribution.NewReplicationConfig(rp)
dist := distribution.CalculateDistribution(ec, rep)
return dist.Summary()
}
// GetDistributionSummaryWithConfig returns a summary with custom EC configuration
func GetDistributionSummaryWithConfig(rp *super_block.ReplicaPlacement, ecConfig distribution.ECConfig) string {
rep := distribution.NewReplicationConfig(rp)
dist := distribution.CalculateDistribution(ecConfig, rep)
return dist.Summary()
}
// GetFaultToleranceAnalysis returns fault tolerance analysis for the given configuration
func GetFaultToleranceAnalysis(rp *super_block.ReplicaPlacement) string {
ec := distribution.DefaultECConfig()
rep := distribution.NewReplicationConfig(rp)
dist := distribution.CalculateDistribution(ec, rep)
return dist.FaultToleranceAnalysis()
}
// GetFaultToleranceAnalysisWithConfig returns fault tolerance analysis with custom EC configuration
func GetFaultToleranceAnalysisWithConfig(rp *super_block.ReplicaPlacement, ecConfig distribution.ECConfig) string {
rep := distribution.NewReplicationConfig(rp)
dist := distribution.CalculateDistribution(ecConfig, rep)
return dist.FaultToleranceAnalysis()
}