chore: remove ~50k lines of unreachable dead code (#8913)
* chore: remove unreachable dead code across the codebase Remove ~50,000 lines of unreachable code identified by static analysis. Major removals: - weed/filer/redis_lua: entire unused Redis Lua filer store implementation - weed/wdclient/net2, resource_pool: unused connection/resource pool packages - weed/plugin/worker/lifecycle: unused lifecycle plugin worker - weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy, multipart IAM, key rotation, and various SSE helper functions - weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions - weed/mq/offset: unused SQL storage and migration code - weed/worker: unused registry, task, and monitoring functions - weed/query: unused SQL engine, parquet scanner, and type functions - weed/shell: unused EC proportional rebalance functions - weed/storage/erasure_coding/distribution: unused distribution analysis functions - Individual unreachable functions removed from 150+ files across admin, credential, filer, iam, kms, mount, mq, operation, pb, s3api, server, shell, storage, topology, and util packages * fix(s3): reset shared memory store in IAM test to prevent flaky failure TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because the MemoryStore credential backend is a singleton registered via init(). Earlier tests that create anonymous identities pollute the shared store, causing LookupAnonymous() to unexpectedly return true. Fix by calling Reset() on the memory store before the test runs. * style: run gofmt on changed files * fix: restore KMS functions used by integration tests * fix(plugin): prevent panic on send to closed worker session channel The Plugin.sendToWorker method could panic with "send on closed channel" when a worker disconnected while a message was being sent. The race was between streamSession.close() closing the outgoing channel and sendToWorker writing to it concurrently. Add a done channel to streamSession that is closed before the outgoing channel, and check it in sendToWorker's select to safely detect closed sessions without panicking.
This commit is contained in:
@@ -1,10 +1,5 @@
|
||||
package distribution
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// ShardLocation represents where a shard is located in the topology
|
||||
type ShardLocation struct {
|
||||
ShardID int
|
||||
@@ -47,101 +42,6 @@ type TopologyAnalysis struct {
|
||||
TotalDCs int
|
||||
}
|
||||
|
||||
// NewTopologyAnalysis creates a new empty analysis
|
||||
func NewTopologyAnalysis() *TopologyAnalysis {
|
||||
return &TopologyAnalysis{
|
||||
ShardsByDC: make(map[string]int),
|
||||
ShardsByRack: make(map[string]int),
|
||||
ShardsByNode: make(map[string]int),
|
||||
DCToShards: make(map[string][]int),
|
||||
RackToShards: make(map[string][]int),
|
||||
NodeToShards: make(map[string][]int),
|
||||
DCToRacks: make(map[string][]string),
|
||||
RackToNodes: make(map[string][]*TopologyNode),
|
||||
AllNodes: make(map[string]*TopologyNode),
|
||||
}
|
||||
}
|
||||
|
||||
// AddShardLocation adds a shard location to the analysis
|
||||
func (a *TopologyAnalysis) AddShardLocation(loc ShardLocation) {
|
||||
// Update counts
|
||||
a.ShardsByDC[loc.DataCenter]++
|
||||
a.ShardsByRack[loc.Rack]++
|
||||
a.ShardsByNode[loc.NodeID]++
|
||||
|
||||
// Update shard lists
|
||||
a.DCToShards[loc.DataCenter] = append(a.DCToShards[loc.DataCenter], loc.ShardID)
|
||||
a.RackToShards[loc.Rack] = append(a.RackToShards[loc.Rack], loc.ShardID)
|
||||
a.NodeToShards[loc.NodeID] = append(a.NodeToShards[loc.NodeID], loc.ShardID)
|
||||
|
||||
a.TotalShards++
|
||||
}
|
||||
|
||||
// AddNode adds a node to the topology (even if it has no shards)
|
||||
func (a *TopologyAnalysis) AddNode(node *TopologyNode) {
|
||||
if _, exists := a.AllNodes[node.NodeID]; exists {
|
||||
return // Already added
|
||||
}
|
||||
|
||||
a.AllNodes[node.NodeID] = node
|
||||
a.TotalNodes++
|
||||
|
||||
// Update topology structure
|
||||
if !slices.Contains(a.DCToRacks[node.DataCenter], node.Rack) {
|
||||
a.DCToRacks[node.DataCenter] = append(a.DCToRacks[node.DataCenter], node.Rack)
|
||||
}
|
||||
a.RackToNodes[node.Rack] = append(a.RackToNodes[node.Rack], node)
|
||||
|
||||
// Update counts
|
||||
if _, exists := a.ShardsByDC[node.DataCenter]; !exists {
|
||||
a.TotalDCs++
|
||||
}
|
||||
if _, exists := a.ShardsByRack[node.Rack]; !exists {
|
||||
a.TotalRacks++
|
||||
}
|
||||
}
|
||||
|
||||
// Finalize computes final statistics after all data is added
|
||||
func (a *TopologyAnalysis) Finalize() {
|
||||
// Ensure we have accurate DC and rack counts
|
||||
dcSet := make(map[string]bool)
|
||||
rackSet := make(map[string]bool)
|
||||
for _, node := range a.AllNodes {
|
||||
dcSet[node.DataCenter] = true
|
||||
rackSet[node.Rack] = true
|
||||
}
|
||||
a.TotalDCs = len(dcSet)
|
||||
a.TotalRacks = len(rackSet)
|
||||
a.TotalNodes = len(a.AllNodes)
|
||||
}
|
||||
|
||||
// String returns a summary of the analysis
|
||||
func (a *TopologyAnalysis) String() string {
|
||||
return fmt.Sprintf("TopologyAnalysis{shards:%d, nodes:%d, racks:%d, dcs:%d}",
|
||||
a.TotalShards, a.TotalNodes, a.TotalRacks, a.TotalDCs)
|
||||
}
|
||||
|
||||
// DetailedString returns a detailed multi-line summary
|
||||
func (a *TopologyAnalysis) DetailedString() string {
|
||||
s := fmt.Sprintf("Topology Analysis:\n")
|
||||
s += fmt.Sprintf(" Total Shards: %d\n", a.TotalShards)
|
||||
s += fmt.Sprintf(" Data Centers: %d\n", a.TotalDCs)
|
||||
for dc, count := range a.ShardsByDC {
|
||||
s += fmt.Sprintf(" %s: %d shards\n", dc, count)
|
||||
}
|
||||
s += fmt.Sprintf(" Racks: %d\n", a.TotalRacks)
|
||||
for rack, count := range a.ShardsByRack {
|
||||
s += fmt.Sprintf(" %s: %d shards\n", rack, count)
|
||||
}
|
||||
s += fmt.Sprintf(" Nodes: %d\n", a.TotalNodes)
|
||||
for nodeID, count := range a.ShardsByNode {
|
||||
if count > 0 {
|
||||
s += fmt.Sprintf(" %s: %d shards\n", nodeID, count)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// TopologyExcess represents a topology level (DC/rack/node) with excess shards
|
||||
type TopologyExcess struct {
|
||||
ID string // DC/rack/node ID
|
||||
@@ -150,91 +50,3 @@ type TopologyExcess struct {
|
||||
Shards []int // Shard IDs at this level
|
||||
Nodes []*TopologyNode // Nodes at this level (for finding sources)
|
||||
}
|
||||
|
||||
// CalculateDCExcess returns DCs with more shards than the target
|
||||
func CalculateDCExcess(analysis *TopologyAnalysis, dist *ECDistribution) []TopologyExcess {
|
||||
var excess []TopologyExcess
|
||||
|
||||
for dc, count := range analysis.ShardsByDC {
|
||||
if count > dist.TargetShardsPerDC {
|
||||
// Collect nodes in this DC
|
||||
var nodes []*TopologyNode
|
||||
for _, rack := range analysis.DCToRacks[dc] {
|
||||
nodes = append(nodes, analysis.RackToNodes[rack]...)
|
||||
}
|
||||
excess = append(excess, TopologyExcess{
|
||||
ID: dc,
|
||||
Level: "dc",
|
||||
Excess: count - dist.TargetShardsPerDC,
|
||||
Shards: analysis.DCToShards[dc],
|
||||
Nodes: nodes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by excess (most excess first)
|
||||
slices.SortFunc(excess, func(a, b TopologyExcess) int {
|
||||
return b.Excess - a.Excess
|
||||
})
|
||||
|
||||
return excess
|
||||
}
|
||||
|
||||
// CalculateRackExcess returns racks with more shards than the target (within a DC)
|
||||
func CalculateRackExcess(analysis *TopologyAnalysis, dc string, targetPerRack int) []TopologyExcess {
|
||||
var excess []TopologyExcess
|
||||
|
||||
for _, rack := range analysis.DCToRacks[dc] {
|
||||
count := analysis.ShardsByRack[rack]
|
||||
if count > targetPerRack {
|
||||
excess = append(excess, TopologyExcess{
|
||||
ID: rack,
|
||||
Level: "rack",
|
||||
Excess: count - targetPerRack,
|
||||
Shards: analysis.RackToShards[rack],
|
||||
Nodes: analysis.RackToNodes[rack],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
slices.SortFunc(excess, func(a, b TopologyExcess) int {
|
||||
return b.Excess - a.Excess
|
||||
})
|
||||
|
||||
return excess
|
||||
}
|
||||
|
||||
// CalculateUnderservedDCs returns DCs that have fewer shards than target
|
||||
func CalculateUnderservedDCs(analysis *TopologyAnalysis, dist *ECDistribution) []string {
|
||||
var underserved []string
|
||||
|
||||
// Check existing DCs
|
||||
for dc, count := range analysis.ShardsByDC {
|
||||
if count < dist.TargetShardsPerDC {
|
||||
underserved = append(underserved, dc)
|
||||
}
|
||||
}
|
||||
|
||||
// Check DCs with nodes but no shards
|
||||
for dc := range analysis.DCToRacks {
|
||||
if _, exists := analysis.ShardsByDC[dc]; !exists {
|
||||
underserved = append(underserved, dc)
|
||||
}
|
||||
}
|
||||
|
||||
return underserved
|
||||
}
|
||||
|
||||
// CalculateUnderservedRacks returns racks that have fewer shards than target
|
||||
func CalculateUnderservedRacks(analysis *TopologyAnalysis, dc string, targetPerRack int) []string {
|
||||
var underserved []string
|
||||
|
||||
for _, rack := range analysis.DCToRacks[dc] {
|
||||
count := analysis.ShardsByRack[rack]
|
||||
if count < targetPerRack {
|
||||
underserved = append(underserved, rack)
|
||||
}
|
||||
}
|
||||
|
||||
return underserved
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user