chore: remove ~50k lines of unreachable dead code (#8913)
* chore: remove unreachable dead code across the codebase Remove ~50,000 lines of unreachable code identified by static analysis. Major removals: - weed/filer/redis_lua: entire unused Redis Lua filer store implementation - weed/wdclient/net2, resource_pool: unused connection/resource pool packages - weed/plugin/worker/lifecycle: unused lifecycle plugin worker - weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy, multipart IAM, key rotation, and various SSE helper functions - weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions - weed/mq/offset: unused SQL storage and migration code - weed/worker: unused registry, task, and monitoring functions - weed/query: unused SQL engine, parquet scanner, and type functions - weed/shell: unused EC proportional rebalance functions - weed/storage/erasure_coding/distribution: unused distribution analysis functions - Individual unreachable functions removed from 150+ files across admin, credential, filer, iam, kms, mount, mq, operation, pb, s3api, server, shell, storage, topology, and util packages * fix(s3): reset shared memory store in IAM test to prevent flaky failure TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because the MemoryStore credential backend is a singleton registered via init(). Earlier tests that create anonymous identities pollute the shared store, causing LookupAnonymous() to unexpectedly return true. Fix by calling Reset() on the memory store before the test runs. * style: run gofmt on changed files * fix: restore KMS functions used by integration tests * fix(plugin): prevent panic on send to closed worker session channel The Plugin.sendToWorker method could panic with "send on closed channel" when a worker disconnected while a message was being sent. The race was between streamSession.close() closing the outgoing channel and sendToWorker writing to it concurrently. Add a done channel to streamSession that is closed before the outgoing channel, and check it in sendToWorker's select to safely detect closed sessions without panicking.
This commit is contained in:
@@ -406,30 +406,6 @@ func sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) {
|
||||
})
|
||||
}
|
||||
|
||||
// if the index node changed the freeEcSlot, need to keep every EcNode still sorted
|
||||
func ensureSortedEcNodes(data []*CandidateEcNode, index int, lessThan func(i, j int) bool) {
|
||||
for i := index - 1; i >= 0; i-- {
|
||||
if lessThan(i+1, i) {
|
||||
swap(data, i, i+1)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i := index + 1; i < len(data); i++ {
|
||||
if lessThan(i, i-1) {
|
||||
swap(data, i, i-1)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func swap(data []*CandidateEcNode, i, j int) {
|
||||
t := data[i]
|
||||
data[i] = data[j]
|
||||
data[j] = t
|
||||
}
|
||||
|
||||
func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (count int) {
|
||||
for _, eci := range ecShardInfos {
|
||||
count += erasure_coding.GetShardCount(eci)
|
||||
@@ -1135,48 +1111,6 @@ func (ecb *ecBalancer) pickRackForShardType(
|
||||
return selected.id, nil
|
||||
}
|
||||
|
||||
func (ecb *ecBalancer) pickRackToBalanceShardsInto(rackToEcNodes map[RackId]*EcRack, rackToShardCount map[string]int) (RackId, error) {
|
||||
targets := []RackId{}
|
||||
targetShards := -1
|
||||
for _, shards := range rackToShardCount {
|
||||
if shards > targetShards {
|
||||
targetShards = shards
|
||||
}
|
||||
}
|
||||
|
||||
details := ""
|
||||
for rackId, rack := range rackToEcNodes {
|
||||
shards := rackToShardCount[string(rackId)]
|
||||
|
||||
if rack.freeEcSlot <= 0 {
|
||||
details += fmt.Sprintf(" Skipped %s because it has no free slots\n", rackId)
|
||||
continue
|
||||
}
|
||||
// For EC shards, replica placement constraint only applies when DiffRackCount > 0.
|
||||
// When DiffRackCount = 0 (e.g., replica placement "000"), EC shards should be
|
||||
// distributed freely across racks for fault tolerance - the "000" means
|
||||
// "no volume replication needed" because erasure coding provides redundancy.
|
||||
if ecb.replicaPlacement != nil && ecb.replicaPlacement.DiffRackCount > 0 && shards > ecb.replicaPlacement.DiffRackCount {
|
||||
details += fmt.Sprintf(" Skipped %s because shards %d > replica placement limit for other racks (%d)\n", rackId, shards, ecb.replicaPlacement.DiffRackCount)
|
||||
continue
|
||||
}
|
||||
|
||||
if shards < targetShards {
|
||||
// Favor racks with less shards, to ensure an uniform distribution.
|
||||
targets = nil
|
||||
targetShards = shards
|
||||
}
|
||||
if shards == targetShards {
|
||||
targets = append(targets, rackId)
|
||||
}
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
return "", errors.New(details)
|
||||
}
|
||||
return targets[rand.IntN(len(targets))], nil
|
||||
}
|
||||
|
||||
func (ecb *ecBalancer) balanceEcShardsWithinRacks(collection string) error {
|
||||
// collect vid => []ecNode, since previous steps can change the locations
|
||||
vidLocations := ecb.collectVolumeIdToEcNodes(collection)
|
||||
@@ -1567,46 +1501,6 @@ func (ecb *ecBalancer) pickOneEcNodeAndMoveOneShard(existingLocation *EcNode, co
|
||||
return moveMountedShardToEcNode(ecb.commandEnv, existingLocation, collection, vid, shardId, destNode, destDiskId, ecb.applyBalancing, ecb.diskType)
|
||||
}
|
||||
|
||||
func pickNEcShardsToMoveFrom(ecNodes []*EcNode, vid needle.VolumeId, n int, diskType types.DiskType) map[erasure_coding.ShardId]*EcNode {
|
||||
picked := make(map[erasure_coding.ShardId]*EcNode)
|
||||
var candidateEcNodes []*CandidateEcNode
|
||||
for _, ecNode := range ecNodes {
|
||||
si := findEcVolumeShardsInfo(ecNode, vid, diskType)
|
||||
if si.Count() > 0 {
|
||||
candidateEcNodes = append(candidateEcNodes, &CandidateEcNode{
|
||||
ecNode: ecNode,
|
||||
shardCount: si.Count(),
|
||||
})
|
||||
}
|
||||
}
|
||||
slices.SortFunc(candidateEcNodes, func(a, b *CandidateEcNode) int {
|
||||
return b.shardCount - a.shardCount
|
||||
})
|
||||
for i := 0; i < n; i++ {
|
||||
selectedEcNodeIndex := -1
|
||||
for i, candidateEcNode := range candidateEcNodes {
|
||||
si := findEcVolumeShardsInfo(candidateEcNode.ecNode, vid, diskType)
|
||||
if si.Count() > 0 {
|
||||
selectedEcNodeIndex = i
|
||||
for _, shardId := range si.Ids() {
|
||||
candidateEcNode.shardCount--
|
||||
picked[shardId] = candidateEcNode.ecNode
|
||||
candidateEcNode.ecNode.deleteEcVolumeShards(vid, []erasure_coding.ShardId{shardId}, diskType)
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if selectedEcNodeIndex >= 0 {
|
||||
ensureSortedEcNodes(candidateEcNodes, selectedEcNodeIndex, func(i, j int) bool {
|
||||
return candidateEcNodes[i].shardCount > candidateEcNodes[j].shardCount
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
return picked
|
||||
}
|
||||
|
||||
func (ecb *ecBalancer) collectVolumeIdToEcNodes(collection string) map[needle.VolumeId][]*EcNode {
|
||||
vidLocations := make(map[needle.VolumeId][]*EcNode)
|
||||
for _, ecNode := range ecb.ecNodes {
|
||||
|
||||
Reference in New Issue
Block a user