Add volume dir tags and EC placement priority (#8472)
* Add volume dir tags to topology Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Add preferred tag config for EC Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Prioritize EC destinations by tags Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Add EC placement planner tag tests Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Refactor EC placement tests to reuse buildActiveTopology Remove buildActiveTopologyWithDiskTags helper function and consolidate tag setup inline in test cases. Tests now use UpdateTopology to apply tags after topology creation, reusing the existing buildActiveTopology function rather than duplicating its logic. All tag scenario tests pass: - TestECPlacementPlannerPrefersTaggedDisks - TestECPlacementPlannerFallsBackWhenTagsInsufficient Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Consolidate normalizeTagList into shared util package Extract normalizeTagList from three locations (volume.go, detection.go, erasure_coding_handler.go) into new weed/util/tag.go as exported NormalizeTagList function. Replace all duplicate implementations with imports and calls to util.NormalizeTagList. This improves code reuse and maintainability by centralizing tag normalization logic. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Add PreferredTags to EC config persistence Add preferred_tags field to ErasureCodingTaskConfig protobuf with field number 5. Update GetConfigSpec to include preferred_tags field in the UI configuration schema. Add PreferredTags to ToTaskPolicy to serialize config to protobuf. Add PreferredTags to FromTaskPolicy to deserialize from protobuf with defensive copy to prevent external mutation. This allows EC preferred tags to be persisted and restored across worker restarts. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Add defensive copy for Tags slice in DiskLocation Copy the incoming tags slice in NewDiskLocation instead of storing by reference. This prevents external callers from mutating the DiskLocation.Tags slice after construction, improving encapsulation and preventing unexpected changes to disk metadata. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Add doc comment to buildCandidateSets method Document the tiered candidate selection and fallback behavior. Explain that for a planner with preferredTags, it accumulates disks matching each tag in order into progressively larger tiers, emits a candidate set once a tier reaches shardsNeeded, and finally falls back to the full candidates set if preferred-tag tiers are insufficient. This clarifies the intended semantics for future maintainers. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Apply final PR review fixes 1. Update parseVolumeTags to replicate single tag entry to all folders instead of leaving some folders with nil tags. This prevents nil pointer dereferences when processing folders without explicit tags. 2. Add defensive copy in ToTaskPolicy for PreferredTags slice to match the pattern used in FromTaskPolicy, preventing external mutation of the returned TaskPolicy. 3. Add clarifying comment in buildCandidateSets explaining that the shardsNeeded <= 0 branch is a defensive check for direct callers, since selectDestinations guarantees shardsNeeded > 0. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Fix nil pointer dereference in parseVolumeTags Ensure all folder tags are initialized to either normalized tags or empty slices, not nil. When multiple tag entries are provided and there are more folders than entries, remaining folders now get empty slices instead of nil, preventing nil pointer dereference in downstream code. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Fix NormalizeTagList to return empty slice instead of nil Change NormalizeTagList to always return a non-nil slice. When all tags are empty or whitespace after normalization, return an empty slice instead of nil. This prevents nil pointer dereferences in downstream code that expects a valid (possibly empty) slice. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Add nil safety check for v.tags pointer Add a safety check to handle the case where v.tags might be nil, preventing a nil pointer dereference. If v.tags is nil, use an empty string instead. This is defensive programming to prevent panics in edge cases. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Add volume.tags flag to weed server and weed mini commands Add the volume.tags CLI option to both the 'weed server' and 'weed mini' commands. This allows users to specify disk tags when running the combined server modes, just like they can with 'weed volume'. The flag uses the same format and description as the volume command: comma-separated tag groups per data dir with ':' separators (e.g. fast:ssd,archive). Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --------- Co-authored-by: Copilot <copilot@github.com> Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
This commit is contained in:
@@ -12,10 +12,11 @@ import (
|
||||
// Config extends BaseConfig with erasure coding specific settings
|
||||
type Config struct {
|
||||
base.BaseConfig
|
||||
QuietForSeconds int `json:"quiet_for_seconds"`
|
||||
FullnessRatio float64 `json:"fullness_ratio"`
|
||||
CollectionFilter string `json:"collection_filter"`
|
||||
MinSizeMB int `json:"min_size_mb"`
|
||||
QuietForSeconds int `json:"quiet_for_seconds"`
|
||||
FullnessRatio float64 `json:"fullness_ratio"`
|
||||
CollectionFilter string `json:"collection_filter"`
|
||||
MinSizeMB int `json:"min_size_mb"`
|
||||
PreferredTags []string `json:"preferred_tags"`
|
||||
}
|
||||
|
||||
// NewDefaultConfig creates a new default erasure coding configuration
|
||||
@@ -30,6 +31,7 @@ func NewDefaultConfig() *Config {
|
||||
FullnessRatio: 0.8, // 80%
|
||||
CollectionFilter: "",
|
||||
MinSizeMB: 30, // 30MB (more reasonable than 100MB)
|
||||
PreferredTags: nil,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,12 +144,27 @@ func GetConfigSpec() base.ConfigSpec {
|
||||
InputType: "number",
|
||||
CSSClasses: "form-control",
|
||||
},
|
||||
{
|
||||
Name: "preferred_tags",
|
||||
JSONName: "preferred_tags",
|
||||
Type: config.FieldTypeString,
|
||||
DefaultValue: "",
|
||||
Required: false,
|
||||
DisplayName: "Preferred Disk Tags",
|
||||
Description: "Comma-separated disk tags to prioritize for EC shard placement",
|
||||
HelpText: "EC shards will be placed on disks with these tags first, then fall back to other disks if needed",
|
||||
Placeholder: "fast,ssd",
|
||||
InputType: "text",
|
||||
CSSClasses: "form-control",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ToTaskPolicy converts configuration to a TaskPolicy protobuf message
|
||||
func (c *Config) ToTaskPolicy() *worker_pb.TaskPolicy {
|
||||
// Defensive copy of PreferredTags to prevent external mutation
|
||||
preferredTagsCopy := append([]string(nil), c.PreferredTags...)
|
||||
return &worker_pb.TaskPolicy{
|
||||
Enabled: c.Enabled,
|
||||
MaxConcurrent: int32(c.MaxConcurrent),
|
||||
@@ -159,6 +176,7 @@ func (c *Config) ToTaskPolicy() *worker_pb.TaskPolicy {
|
||||
QuietForSeconds: int32(c.QuietForSeconds),
|
||||
MinVolumeSizeMb: int32(c.MinSizeMB),
|
||||
CollectionFilter: c.CollectionFilter,
|
||||
PreferredTags: preferredTagsCopy,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -181,6 +199,7 @@ func (c *Config) FromTaskPolicy(policy *worker_pb.TaskPolicy) error {
|
||||
c.QuietForSeconds = int(ecConfig.QuietForSeconds)
|
||||
c.MinSizeMB = int(ecConfig.MinVolumeSizeMb)
|
||||
c.CollectionFilter = ecConfig.CollectionFilter
|
||||
c.PreferredTags = append([]string(nil), ecConfig.PreferredTags...)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -11,9 +11,10 @@ import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding/placement"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/wildcard"
|
||||
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
|
||||
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/util"
|
||||
workerutil "github.com/seaweedfs/seaweedfs/weed/worker/tasks/util"
|
||||
"github.com/seaweedfs/seaweedfs/weed/worker/types"
|
||||
)
|
||||
|
||||
@@ -148,7 +149,7 @@ func Detection(ctx context.Context, metrics []*types.VolumeHealthMetrics, cluste
|
||||
|
||||
glog.Infof("EC Detection: ActiveTopology available, planning destinations for volume %d", metric.VolumeID)
|
||||
if planner == nil {
|
||||
planner = newECPlacementPlanner(clusterInfo.ActiveTopology)
|
||||
planner = newECPlacementPlanner(clusterInfo.ActiveTopology, ecConfig.PreferredTags)
|
||||
}
|
||||
multiPlan, err := planECDestinations(planner, metric, ecConfig)
|
||||
if err != nil {
|
||||
@@ -344,21 +345,27 @@ type ecPlacementPlanner struct {
|
||||
candidates []*placement.DiskCandidate
|
||||
candidateByKey map[string]*placement.DiskCandidate
|
||||
diskStates map[string]*ecDiskState
|
||||
diskTags map[string][]string
|
||||
preferredTags []string
|
||||
}
|
||||
|
||||
func newECPlacementPlanner(activeTopology *topology.ActiveTopology) *ecPlacementPlanner {
|
||||
func newECPlacementPlanner(activeTopology *topology.ActiveTopology, preferredTags []string) *ecPlacementPlanner {
|
||||
if activeTopology == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
disks := activeTopology.GetDisksWithEffectiveCapacity(topology.TaskTypeErasureCoding, "", 0)
|
||||
candidates := diskInfosToCandidates(disks)
|
||||
tagsByKey := collectDiskTags(disks)
|
||||
normalizedPreferredTags := util.NormalizeTagList(preferredTags)
|
||||
if len(candidates) == 0 {
|
||||
return &ecPlacementPlanner{
|
||||
activeTopology: activeTopology,
|
||||
candidates: candidates,
|
||||
candidateByKey: map[string]*placement.DiskCandidate{},
|
||||
diskStates: map[string]*ecDiskState{},
|
||||
diskTags: tagsByKey,
|
||||
preferredTags: normalizedPreferredTags,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -377,6 +384,8 @@ func newECPlacementPlanner(activeTopology *topology.ActiveTopology) *ecPlacement
|
||||
candidates: candidates,
|
||||
candidateByKey: candidateByKey,
|
||||
diskStates: diskStates,
|
||||
diskTags: tagsByKey,
|
||||
preferredTags: normalizedPreferredTags,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -397,11 +406,21 @@ func (p *ecPlacementPlanner) selectDestinations(sourceRack, sourceDC string, sha
|
||||
PreferDifferentRacks: true,
|
||||
}
|
||||
|
||||
result, err := placement.SelectDestinations(p.candidates, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var lastErr error
|
||||
for _, candidates := range p.buildCandidateSets(shardsNeeded) {
|
||||
if len(candidates) == 0 {
|
||||
continue
|
||||
}
|
||||
result, err := placement.SelectDestinations(candidates, config)
|
||||
if err == nil {
|
||||
return result.SelectedDisks, nil
|
||||
}
|
||||
lastErr = err
|
||||
}
|
||||
return result.SelectedDisks, nil
|
||||
if lastErr == nil {
|
||||
lastErr = fmt.Errorf("no EC placement candidates available")
|
||||
}
|
||||
return nil, lastErr
|
||||
}
|
||||
|
||||
func (p *ecPlacementPlanner) applyTaskReservations(volumeSize int64, sources []topology.TaskSourceSpec, destinations []topology.TaskDestinationSpec) {
|
||||
@@ -501,6 +520,77 @@ func ecDiskKey(nodeID string, diskID uint32) string {
|
||||
return fmt.Sprintf("%s:%d", nodeID, diskID)
|
||||
}
|
||||
|
||||
func collectDiskTags(disks []*topology.DiskInfo) map[string][]string {
|
||||
tagMap := make(map[string][]string, len(disks))
|
||||
for _, disk := range disks {
|
||||
if disk == nil || disk.DiskInfo == nil {
|
||||
continue
|
||||
}
|
||||
key := ecDiskKey(disk.NodeID, disk.DiskID)
|
||||
tags := util.NormalizeTagList(disk.DiskInfo.Tags)
|
||||
if len(tags) > 0 {
|
||||
tagMap[key] = tags
|
||||
}
|
||||
}
|
||||
return tagMap
|
||||
}
|
||||
|
||||
func diskHasTag(tags []string, tag string) bool {
|
||||
if tag == "" || len(tags) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, candidate := range tags {
|
||||
if candidate == tag {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// buildCandidateSets builds tiered candidate sets for preferred-tag prioritized placement.
|
||||
// For a planner with preferredTags, it accumulates disks matching each tag in order into
|
||||
// progressively larger tiers. It emits a candidate set once a tier reaches shardsNeeded,
|
||||
// then continues accumulating for subsequent tags. Finally, it falls back to the full
|
||||
// p.candidates set if preferred-tag tiers are insufficient. This ensures tagged disks
|
||||
// are selected first before falling back to all available candidates.
|
||||
func (p *ecPlacementPlanner) buildCandidateSets(shardsNeeded int) [][]*placement.DiskCandidate {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
if len(p.preferredTags) == 0 {
|
||||
return [][]*placement.DiskCandidate{p.candidates}
|
||||
}
|
||||
selected := make(map[string]bool, len(p.candidates))
|
||||
var tier []*placement.DiskCandidate
|
||||
var candidateSets [][]*placement.DiskCandidate
|
||||
for _, tag := range p.preferredTags {
|
||||
for _, candidate := range p.candidates {
|
||||
key := ecDiskKey(candidate.NodeID, candidate.DiskID)
|
||||
if selected[key] {
|
||||
continue
|
||||
}
|
||||
if diskHasTag(p.diskTags[key], tag) {
|
||||
selected[key] = true
|
||||
tier = append(tier, candidate)
|
||||
}
|
||||
}
|
||||
if shardsNeeded > 0 && len(tier) >= shardsNeeded {
|
||||
candidateSets = append(candidateSets, append([]*placement.DiskCandidate(nil), tier...))
|
||||
}
|
||||
}
|
||||
// Defensive check: selectDestinations always ensures shardsNeeded > 0 before calling
|
||||
// buildCandidateSets, but this branch handles direct callers and edge cases.
|
||||
if shardsNeeded <= 0 && len(tier) > 0 {
|
||||
candidateSets = append(candidateSets, append([]*placement.DiskCandidate(nil), tier...))
|
||||
}
|
||||
if len(tier) < len(p.candidates) {
|
||||
candidateSets = append(candidateSets, p.candidates)
|
||||
} else if len(candidateSets) == 0 {
|
||||
candidateSets = append(candidateSets, p.candidates)
|
||||
}
|
||||
return candidateSets
|
||||
}
|
||||
|
||||
// planECDestinations plans the destinations for erasure coding operation
|
||||
// This function implements EC destination planning logic directly in the detection phase
|
||||
func planECDestinations(planner *ecPlacementPlanner, metric *types.VolumeHealthMetrics, ecConfig *Config) (*topology.MultiDestinationPlan, error) {
|
||||
@@ -550,7 +640,7 @@ func planECDestinations(planner *ecPlacementPlanner, metric *types.VolumeHealthM
|
||||
|
||||
for _, disk := range selectedDisks {
|
||||
// Get the target server address
|
||||
targetAddress, err := util.ResolveServerAddress(disk.NodeID, planner.activeTopology)
|
||||
targetAddress, err := workerutil.ResolveServerAddress(disk.NodeID, planner.activeTopology)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve address for target server %s: %v", disk.NodeID, err)
|
||||
}
|
||||
@@ -654,7 +744,7 @@ func convertTaskSourcesToProtobuf(sources []topology.TaskSourceSpec, volumeID ui
|
||||
var protobufSources []*worker_pb.TaskSource
|
||||
|
||||
for _, source := range sources {
|
||||
serverAddress, err := util.ResolveServerAddress(source.ServerID, activeTopology)
|
||||
serverAddress, err := workerutil.ResolveServerAddress(source.ServerID, activeTopology)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve address for source server %s: %v", source.ServerID, err)
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
func TestECPlacementPlannerApplyReservations(t *testing.T) {
|
||||
activeTopology := buildActiveTopology(t, 1, []string{"hdd"}, 10, 0)
|
||||
|
||||
planner := newECPlacementPlanner(activeTopology)
|
||||
planner := newECPlacementPlanner(activeTopology, nil)
|
||||
require.NotNil(t, planner)
|
||||
|
||||
key := ecDiskKey("10.0.0.1:8080", 0)
|
||||
@@ -47,7 +47,7 @@ func TestECPlacementPlannerApplyReservations(t *testing.T) {
|
||||
|
||||
func TestPlanECDestinationsUsesPlanner(t *testing.T) {
|
||||
activeTopology := buildActiveTopology(t, 7, []string{"hdd", "ssd"}, 100, 0)
|
||||
planner := newECPlacementPlanner(activeTopology)
|
||||
planner := newECPlacementPlanner(activeTopology, nil)
|
||||
require.NotNil(t, planner)
|
||||
|
||||
metric := &types.VolumeHealthMetrics{
|
||||
@@ -63,6 +63,70 @@ func TestPlanECDestinationsUsesPlanner(t *testing.T) {
|
||||
assert.Equal(t, erasure_coding.TotalShardsCount, len(plan.Plans))
|
||||
}
|
||||
|
||||
func TestECPlacementPlannerPrefersTaggedDisks(t *testing.T) {
|
||||
activeTopology := buildActiveTopology(t, 3, []string{"hdd"}, 10, 0)
|
||||
topo := activeTopology.GetTopologyInfo()
|
||||
for _, dc := range topo.DataCenterInfos {
|
||||
for _, rack := range dc.RackInfos {
|
||||
for k, node := range rack.DataNodeInfos {
|
||||
for diskType := range node.DiskInfos {
|
||||
if k < 2 {
|
||||
node.DiskInfos[diskType].Tags = []string{"fast"}
|
||||
} else {
|
||||
node.DiskInfos[diskType].Tags = []string{"slow"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
require.NoError(t, activeTopology.UpdateTopology(topo))
|
||||
|
||||
planner := newECPlacementPlanner(activeTopology, []string{"fast"})
|
||||
require.NotNil(t, planner)
|
||||
|
||||
selected, err := planner.selectDestinations("", "", 2)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, selected, 2)
|
||||
|
||||
for _, candidate := range selected {
|
||||
key := ecDiskKey(candidate.NodeID, candidate.DiskID)
|
||||
assert.True(t, diskHasTag(planner.diskTags[key], "fast"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestECPlacementPlannerFallsBackWhenTagsInsufficient(t *testing.T) {
|
||||
activeTopology := buildActiveTopology(t, 3, []string{"hdd"}, 10, 0)
|
||||
topo := activeTopology.GetTopologyInfo()
|
||||
for _, dc := range topo.DataCenterInfos {
|
||||
for _, rack := range dc.RackInfos {
|
||||
for i, node := range rack.DataNodeInfos {
|
||||
for diskType := range node.DiskInfos {
|
||||
if i == 0 {
|
||||
node.DiskInfos[diskType].Tags = []string{"fast"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
require.NoError(t, activeTopology.UpdateTopology(topo))
|
||||
|
||||
planner := newECPlacementPlanner(activeTopology, []string{"fast"})
|
||||
require.NotNil(t, planner)
|
||||
|
||||
selected, err := planner.selectDestinations("", "", 3)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, selected, 3)
|
||||
|
||||
taggedCount := 0
|
||||
for _, candidate := range selected {
|
||||
key := ecDiskKey(candidate.NodeID, candidate.DiskID)
|
||||
if diskHasTag(planner.diskTags[key], "fast") {
|
||||
taggedCount++
|
||||
}
|
||||
}
|
||||
assert.Less(t, taggedCount, len(selected))
|
||||
}
|
||||
|
||||
func TestDetectionContextCancellation(t *testing.T) {
|
||||
activeTopology := buildActiveTopology(t, 5, []string{"hdd", "ssd"}, 50, 0)
|
||||
clusterInfo := &types.ClusterInfo{ActiveTopology: activeTopology}
|
||||
@@ -88,7 +152,7 @@ func TestDetectionMaxResultsHonorsLimit(t *testing.T) {
|
||||
|
||||
func TestPlanECDestinationsFailsWithInsufficientCapacity(t *testing.T) {
|
||||
activeTopology := buildActiveTopology(t, 1, []string{"hdd"}, 1, 1)
|
||||
planner := newECPlacementPlanner(activeTopology)
|
||||
planner := newECPlacementPlanner(activeTopology, nil)
|
||||
require.NotNil(t, planner)
|
||||
|
||||
metric := &types.VolumeHealthMetrics{
|
||||
|
||||
Reference in New Issue
Block a user