feat(balance): replica placement validation for volume moves (#8622)
* feat(balance): add replica placement validation for volume moves When the volume balance detection proposes moving a volume, validate that the move does not violate the volume's replication policy (e.g., ReplicaPlacement=010 requires replicas on different racks). If the preferred destination violates the policy, fall back to score-based planning; if that also violates, skip the volume entirely. - Add ReplicaLocation type and VolumeReplicaMap to ClusterInfo - Build replica map from all volumes before collection filtering - Port placement validation logic from command_volume_fix_replication.go - Thread replica map through collectVolumeMetrics call chain - Add IsGoodMove check in createBalanceTask before destination use * address PR review: extract validation closure, add defensive checks - Extract validateMove closure to eliminate duplicated ReplicaLocation construction and IsGoodMove calls - Add defensive check for empty replica map entries (len(replicas) == 0) - Add bounds check for int-to-byte cast on ExpectedReplicas (0-255) * address nitpick: rp test helper accepts *testing.T and fails on error Prevents silent failures from typos in replica placement codes. * address review: add composite replica placement tests (011, 110) Test multi-constraint placement policies where both rack and DC rules must be satisfied simultaneously. * address review: use struct keys instead of string concatenation Replace string-concatenated map keys with typed rackKey/nodeKey structs to eliminate allocations and avoid ambiguity if IDs contain spaces. * address review: simplify bounds check, log fallback error, guard source - Remove unreachable ExpectedReplicas < 0 branch (outer condition already guarantees > 0), fold bounds check into single condition - Log error from planBalanceDestination in replica validation fallback - Return false from IsGoodMove when sourceNodeID not found in existing replicas (inconsistent cluster state) * address review: use slices.Contains instead of hand-rolled helpers Replace isAmongDC and isAmongRack with slices.Contains from the standard library, reducing boilerplate.
This commit is contained in:
146
weed/worker/tasks/balance/replica_placement.go
Normal file
146
weed/worker/tasks/balance/replica_placement.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package balance
|
||||
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
|
||||
"github.com/seaweedfs/seaweedfs/weed/worker/types"
|
||||
)
|
||||
|
||||
// rackKey uniquely identifies a rack within a data center.
|
||||
type rackKey struct {
|
||||
DataCenter string
|
||||
Rack string
|
||||
}
|
||||
|
||||
// nodeKey uniquely identifies a node within a rack.
|
||||
type nodeKey struct {
|
||||
DataCenter string
|
||||
Rack string
|
||||
NodeID string
|
||||
}
|
||||
|
||||
// IsGoodMove checks whether moving a volume from sourceNodeID to target
|
||||
// would satisfy the volume's replica placement policy, given the current
|
||||
// set of replica locations.
|
||||
func IsGoodMove(rp *super_block.ReplicaPlacement, existingReplicas []types.ReplicaLocation, sourceNodeID string, target types.ReplicaLocation) bool {
|
||||
if rp == nil || !rp.HasReplication() {
|
||||
return true // no replication constraint
|
||||
}
|
||||
|
||||
// Build the replica set after the move: remove source, add target
|
||||
afterMove := make([]types.ReplicaLocation, 0, len(existingReplicas))
|
||||
sourceFound := false
|
||||
for _, r := range existingReplicas {
|
||||
if r.NodeID == sourceNodeID {
|
||||
sourceFound = true
|
||||
} else {
|
||||
afterMove = append(afterMove, r)
|
||||
}
|
||||
}
|
||||
if !sourceFound {
|
||||
// Source not in replica list — cluster state may be inconsistent.
|
||||
// Treat as unsafe to avoid incorrect placement decisions.
|
||||
return false
|
||||
}
|
||||
|
||||
return satisfyReplicaPlacement(rp, afterMove, target)
|
||||
}
|
||||
|
||||
// satisfyReplicaPlacement checks whether placing a replica at target
|
||||
// is consistent with the replication policy, given the existing replicas.
|
||||
// Ported from weed/shell/command_volume_fix_replication.go
|
||||
func satisfyReplicaPlacement(rp *super_block.ReplicaPlacement, replicas []types.ReplicaLocation, target types.ReplicaLocation) bool {
|
||||
existingDCs, _, existingNodes := countReplicas(replicas)
|
||||
|
||||
targetNK := nodeKey{DataCenter: target.DataCenter, Rack: target.Rack, NodeID: target.NodeID}
|
||||
if _, found := existingNodes[targetNK]; found {
|
||||
// avoid duplicated volume on the same data node
|
||||
return false
|
||||
}
|
||||
|
||||
primaryDCs, _ := findTopDCKeys(existingDCs)
|
||||
|
||||
// ensure data center count is within limit
|
||||
if _, found := existingDCs[target.DataCenter]; !found {
|
||||
// different from existing dcs
|
||||
if len(existingDCs) < rp.DiffDataCenterCount+1 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
// now same as one of existing data centers
|
||||
if !slices.Contains(primaryDCs, target.DataCenter) {
|
||||
return false
|
||||
}
|
||||
|
||||
// now on a primary dc - check racks within this DC
|
||||
primaryDcRacks := make(map[rackKey]int)
|
||||
for _, r := range replicas {
|
||||
if r.DataCenter != target.DataCenter {
|
||||
continue
|
||||
}
|
||||
primaryDcRacks[rackKey{DataCenter: r.DataCenter, Rack: r.Rack}]++
|
||||
}
|
||||
|
||||
targetRK := rackKey{DataCenter: target.DataCenter, Rack: target.Rack}
|
||||
primaryRacks, _ := findTopRackKeys(primaryDcRacks)
|
||||
sameRackCount := primaryDcRacks[targetRK]
|
||||
|
||||
if _, found := primaryDcRacks[targetRK]; !found {
|
||||
// different from existing racks
|
||||
if len(primaryDcRacks) < rp.DiffRackCount+1 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
// same as one of existing racks
|
||||
if !slices.Contains(primaryRacks, targetRK) {
|
||||
return false
|
||||
}
|
||||
|
||||
// on primary rack - check same-rack count
|
||||
if sameRackCount < rp.SameRackCount+1 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func countReplicas(replicas []types.ReplicaLocation) (dcCounts map[string]int, rackCounts map[rackKey]int, nodeCounts map[nodeKey]int) {
|
||||
dcCounts = make(map[string]int)
|
||||
rackCounts = make(map[rackKey]int)
|
||||
nodeCounts = make(map[nodeKey]int)
|
||||
for _, r := range replicas {
|
||||
dcCounts[r.DataCenter]++
|
||||
rackCounts[rackKey{DataCenter: r.DataCenter, Rack: r.Rack}]++
|
||||
nodeCounts[nodeKey{DataCenter: r.DataCenter, Rack: r.Rack, NodeID: r.NodeID}]++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func findTopDCKeys(m map[string]int) (topKeys []string, max int) {
|
||||
for k, c := range m {
|
||||
if max < c {
|
||||
topKeys = topKeys[:0]
|
||||
topKeys = append(topKeys, k)
|
||||
max = c
|
||||
} else if max == c {
|
||||
topKeys = append(topKeys, k)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func findTopRackKeys(m map[rackKey]int) (topKeys []rackKey, max int) {
|
||||
for k, c := range m {
|
||||
if max < c {
|
||||
topKeys = topKeys[:0]
|
||||
topKeys = append(topKeys, k)
|
||||
max = c
|
||||
} else if max == c {
|
||||
topKeys = append(topKeys, k)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user