Admin UI: include ec shard sizes into volume server info (#7071)
* show ec shards on dashboard, show max in its own column * master collect shard size info * master send shard size via VolumeList * change to more efficient shard sizes slice * include ec shard sizes into volume server info * Eliminated Redundant gRPC Calls * much more efficient * Efficient Counting: bits.OnesCount32() uses CPU-optimized instructions to count set bits in O(1) * avoid extra volume list call * simplify * preserve existing shard sizes * avoid hard coded value * Update weed/storage/erasure_coding/ec_volume_info.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/admin/dash/volume_management.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update ec_volume_info.go * address comments * avoid duplicated functions * Update weed/admin/dash/volume_management.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * simplify * refactoring * fix compilation --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -168,19 +168,16 @@ func planECDestinations(activeTopology *topology.ActiveTopology, metric *types.V
|
||||
}
|
||||
}
|
||||
|
||||
// Determine minimum shard disk locations based on configuration
|
||||
minTotalDisks := 4
|
||||
|
||||
// Get available disks for EC placement (include source node for EC)
|
||||
availableDisks := activeTopology.GetAvailableDisks(topology.TaskTypeErasureCoding, "")
|
||||
if len(availableDisks) < minTotalDisks {
|
||||
return nil, fmt.Errorf("insufficient disks for EC placement: need %d, have %d", minTotalDisks, len(availableDisks))
|
||||
if len(availableDisks) < erasure_coding.MinTotalDisks {
|
||||
return nil, fmt.Errorf("insufficient disks for EC placement: need %d, have %d", erasure_coding.MinTotalDisks, len(availableDisks))
|
||||
}
|
||||
|
||||
// Select best disks for EC placement with rack/DC diversity
|
||||
selectedDisks := selectBestECDestinations(availableDisks, sourceRack, sourceDC, erasure_coding.TotalShardsCount)
|
||||
if len(selectedDisks) < minTotalDisks {
|
||||
return nil, fmt.Errorf("found %d disks, but could not find %d suitable destinations for EC placement", len(selectedDisks), minTotalDisks)
|
||||
if len(selectedDisks) < erasure_coding.MinTotalDisks {
|
||||
return nil, fmt.Errorf("found %d disks, but could not find %d suitable destinations for EC placement", len(selectedDisks), erasure_coding.MinTotalDisks)
|
||||
}
|
||||
|
||||
var plans []*topology.DestinationPlan
|
||||
|
||||
@@ -81,9 +81,6 @@ func (t *ErasureCodingTask) Execute(ctx context.Context, params *worker_pb.TaskP
|
||||
|
||||
// Use the working directory from task parameters, or fall back to a default
|
||||
baseWorkDir := t.workDir
|
||||
if baseWorkDir == "" {
|
||||
baseWorkDir = "/tmp/seaweedfs_ec_work"
|
||||
}
|
||||
|
||||
// Create unique working directory for this task
|
||||
taskWorkDir := filepath.Join(baseWorkDir, fmt.Sprintf("vol_%d_%d", t.volumeID, time.Now().Unix()))
|
||||
|
||||
Reference in New Issue
Block a user