Admin UI: include ec shard sizes into volume server info (#7071)
* show ec shards on dashboard, show max in its own column * master collect shard size info * master send shard size via VolumeList * change to more efficient shard sizes slice * include ec shard sizes into volume server info * Eliminated Redundant gRPC Calls * much more efficient * Efficient Counting: bits.OnesCount32() uses CPU-optimized instructions to count set bits in O(1) * avoid extra volume list call * simplify * preserve existing shard sizes * avoid hard coded value * Update weed/storage/erasure_coding/ec_volume_info.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/admin/dash/volume_management.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update ec_volume_info.go * address comments * avoid duplicated functions * Update weed/admin/dash/volume_management.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * simplify * refactoring * fix compilation --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -17,14 +17,18 @@ func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInf
|
||||
// convert into in memory struct storage.VolumeInfo
|
||||
var shards []*erasure_coding.EcVolumeInfo
|
||||
for _, shardInfo := range shardInfos {
|
||||
shards = append(shards,
|
||||
erasure_coding.NewEcVolumeInfo(
|
||||
shardInfo.DiskType,
|
||||
shardInfo.Collection,
|
||||
needle.VolumeId(shardInfo.Id),
|
||||
erasure_coding.ShardBits(shardInfo.EcIndexBits),
|
||||
shardInfo.ExpireAtSec,
|
||||
shardInfo.DiskId))
|
||||
// Create EcVolumeInfo directly with optimized format
|
||||
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
|
||||
VolumeId: needle.VolumeId(shardInfo.Id),
|
||||
Collection: shardInfo.Collection,
|
||||
ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits),
|
||||
DiskType: shardInfo.DiskType,
|
||||
DiskId: shardInfo.DiskId,
|
||||
ExpireAtSec: shardInfo.ExpireAtSec,
|
||||
ShardSizes: shardInfo.ShardSizes,
|
||||
}
|
||||
|
||||
shards = append(shards, ecVolumeInfo)
|
||||
}
|
||||
// find out the delta volumes
|
||||
newShards, deletedShards = dn.UpdateEcShards(shards)
|
||||
@@ -41,24 +45,32 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
|
||||
// convert into in memory struct storage.VolumeInfo
|
||||
var newShards, deletedShards []*erasure_coding.EcVolumeInfo
|
||||
for _, shardInfo := range newEcShards {
|
||||
newShards = append(newShards,
|
||||
erasure_coding.NewEcVolumeInfo(
|
||||
shardInfo.DiskType,
|
||||
shardInfo.Collection,
|
||||
needle.VolumeId(shardInfo.Id),
|
||||
erasure_coding.ShardBits(shardInfo.EcIndexBits),
|
||||
shardInfo.ExpireAtSec,
|
||||
shardInfo.DiskId))
|
||||
// Create EcVolumeInfo directly with optimized format
|
||||
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
|
||||
VolumeId: needle.VolumeId(shardInfo.Id),
|
||||
Collection: shardInfo.Collection,
|
||||
ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits),
|
||||
DiskType: shardInfo.DiskType,
|
||||
DiskId: shardInfo.DiskId,
|
||||
ExpireAtSec: shardInfo.ExpireAtSec,
|
||||
ShardSizes: shardInfo.ShardSizes,
|
||||
}
|
||||
|
||||
newShards = append(newShards, ecVolumeInfo)
|
||||
}
|
||||
for _, shardInfo := range deletedEcShards {
|
||||
deletedShards = append(deletedShards,
|
||||
erasure_coding.NewEcVolumeInfo(
|
||||
shardInfo.DiskType,
|
||||
shardInfo.Collection,
|
||||
needle.VolumeId(shardInfo.Id),
|
||||
erasure_coding.ShardBits(shardInfo.EcIndexBits),
|
||||
shardInfo.ExpireAtSec,
|
||||
shardInfo.DiskId))
|
||||
// Create EcVolumeInfo directly with optimized format
|
||||
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
|
||||
VolumeId: needle.VolumeId(shardInfo.Id),
|
||||
Collection: shardInfo.Collection,
|
||||
ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits),
|
||||
DiskType: shardInfo.DiskType,
|
||||
DiskId: shardInfo.DiskId,
|
||||
ExpireAtSec: shardInfo.ExpireAtSec,
|
||||
ShardSizes: shardInfo.ShardSizes,
|
||||
}
|
||||
|
||||
deletedShards = append(deletedShards, ecVolumeInfo)
|
||||
}
|
||||
|
||||
dn.DeltaUpdateEcShards(newShards, deletedShards)
|
||||
@@ -69,7 +81,6 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
|
||||
for _, v := range deletedShards {
|
||||
t.UnRegisterEcShards(v, dn)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NewEcShardLocations(collection string) *EcShardLocations {
|
||||
@@ -178,6 +189,4 @@ func (t *Topology) DeleteEcCollection(collection string) {
|
||||
for _, vid := range vids {
|
||||
delete(t.ecShardMap, vid)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user