chore: remove ~50k lines of unreachable dead code (#8913)

* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
This commit is contained in:
Chris Lu
2026-04-03 16:04:27 -07:00
committed by GitHub
parent 8fad85aed7
commit 995dfc4d5d
264 changed files with 62 additions and 46027 deletions

View File

@@ -1,12 +1,6 @@
// Package distribution provides EC shard distribution algorithms with configurable EC ratios.
package distribution
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
)
// ECConfig holds erasure coding configuration parameters.
// This replaces hard-coded constants like DataShardsCount=10, ParityShardsCount=4.
type ECConfig struct {
@@ -14,113 +8,6 @@ type ECConfig struct {
ParityShards int // Number of parity shards (e.g., 4)
}
// DefaultECConfig returns the standard 10+4 EC configuration
func DefaultECConfig() ECConfig {
return ECConfig{
DataShards: 10,
ParityShards: 4,
}
}
// NewECConfig creates a new EC configuration with validation
func NewECConfig(dataShards, parityShards int) (ECConfig, error) {
if dataShards <= 0 {
return ECConfig{}, fmt.Errorf("dataShards must be positive, got %d", dataShards)
}
if parityShards <= 0 {
return ECConfig{}, fmt.Errorf("parityShards must be positive, got %d", parityShards)
}
if dataShards+parityShards > 32 {
return ECConfig{}, fmt.Errorf("total shards (%d+%d=%d) exceeds maximum of 32",
dataShards, parityShards, dataShards+parityShards)
}
return ECConfig{
DataShards: dataShards,
ParityShards: parityShards,
}, nil
}
// TotalShards returns the total number of shards (data + parity)
func (c ECConfig) TotalShards() int {
return c.DataShards + c.ParityShards
}
// MaxTolerableLoss returns the maximum number of shards that can be lost
// while still being able to reconstruct the data
func (c ECConfig) MaxTolerableLoss() int {
return c.ParityShards
}
// MinShardsForReconstruction returns the minimum number of shards needed
// to reconstruct the original data
func (c ECConfig) MinShardsForReconstruction() int {
return c.DataShards
}
// String returns a human-readable representation
func (c ECConfig) String() string {
return fmt.Sprintf("%d+%d (total: %d, can lose: %d)",
c.DataShards, c.ParityShards, c.TotalShards(), c.MaxTolerableLoss())
}
// IsDataShard returns true if the shard ID is a data shard (0 to DataShards-1)
func (c ECConfig) IsDataShard(shardID int) bool {
return shardID >= 0 && shardID < c.DataShards
}
// IsParityShard returns true if the shard ID is a parity shard (DataShards to TotalShards-1)
func (c ECConfig) IsParityShard(shardID int) bool {
return shardID >= c.DataShards && shardID < c.TotalShards()
}
// SortShardsDataFirst returns a copy of shards sorted with data shards first.
// This is useful for initial placement where data shards should be spread out first.
func (c ECConfig) SortShardsDataFirst(shards []int) []int {
result := make([]int, len(shards))
copy(result, shards)
// Partition: data shards first, then parity shards
dataIdx := 0
parityIdx := len(result) - 1
sorted := make([]int, len(result))
for _, s := range result {
if c.IsDataShard(s) {
sorted[dataIdx] = s
dataIdx++
} else {
sorted[parityIdx] = s
parityIdx--
}
}
return sorted
}
// SortShardsParityFirst returns a copy of shards sorted with parity shards first.
// This is useful for rebalancing where we prefer to move parity shards.
func (c ECConfig) SortShardsParityFirst(shards []int) []int {
result := make([]int, len(shards))
copy(result, shards)
// Partition: parity shards first, then data shards
parityIdx := 0
dataIdx := len(result) - 1
sorted := make([]int, len(result))
for _, s := range result {
if c.IsParityShard(s) {
sorted[parityIdx] = s
parityIdx++
} else {
sorted[dataIdx] = s
dataIdx--
}
}
return sorted
}
// ReplicationConfig holds the parsed replication policy
type ReplicationConfig struct {
MinDataCenters int // X+1 from XYZ replication (minimum DCs to use)
@@ -130,42 +17,3 @@ type ReplicationConfig struct {
// Original replication string (for logging/debugging)
Original string
}
// NewReplicationConfig creates a ReplicationConfig from a ReplicaPlacement
func NewReplicationConfig(rp *super_block.ReplicaPlacement) ReplicationConfig {
if rp == nil {
return ReplicationConfig{
MinDataCenters: 1,
MinRacksPerDC: 1,
MinNodesPerRack: 1,
Original: "000",
}
}
return ReplicationConfig{
MinDataCenters: rp.DiffDataCenterCount + 1,
MinRacksPerDC: rp.DiffRackCount + 1,
MinNodesPerRack: rp.SameRackCount + 1,
Original: rp.String(),
}
}
// NewReplicationConfigFromString creates a ReplicationConfig from a replication string
func NewReplicationConfigFromString(replication string) (ReplicationConfig, error) {
rp, err := super_block.NewReplicaPlacementFromString(replication)
if err != nil {
return ReplicationConfig{}, err
}
return NewReplicationConfig(rp), nil
}
// TotalPlacementSlots returns the minimum number of unique placement locations
// based on the replication policy
func (r ReplicationConfig) TotalPlacementSlots() int {
return r.MinDataCenters * r.MinRacksPerDC * r.MinNodesPerRack
}
// String returns a human-readable representation
func (r ReplicationConfig) String() string {
return fmt.Sprintf("replication=%s (DCs:%d, Racks/DC:%d, Nodes/Rack:%d)",
r.Original, r.MinDataCenters, r.MinRacksPerDC, r.MinNodesPerRack)
}