Fix reporting of EC shard sizes from nodes to masters. (#7835)

SeaweedFS tracks EC shard sizes on topology data stuctures, but this information is never
relayed to master servers :( The end result is that commands reporting disk usage, such
as `volume.list` and `cluster.status`, yield incorrect figures when EC shards are present.

As an example for a simple 5-node test cluster, before...

```
> volume.list
Topology volumeSizeLimit:30000 MB hdd(volume:6/40 active:6 free:33 remote:0)
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9001 hdd(volume:1/8 active:1 free:7 remote:0)
        Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
          volume id:3  size:88967096  file_count:172  replica_placement:2  version:3  modified_at_second:1766349617
          ec volume id:1 collection: shards:[1 5]
        Disk hdd total size:88967096 file_count:172
      DataNode 192.168.10.111:9001 total size:88967096 file_count:172
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9002 hdd(volume:2/8 active:2 free:6 remote:0)
        Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
          volume id:2  size:77267536  file_count:166  replica_placement:2  version:3  modified_at_second:1766349617
          volume id:3  size:88967096  file_count:172  replica_placement:2  version:3  modified_at_second:1766349617
          ec volume id:1 collection: shards:[0 4]
        Disk hdd total size:166234632 file_count:338
      DataNode 192.168.10.111:9002 total size:166234632 file_count:338
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9003 hdd(volume:1/8 active:1 free:7 remote:0)
        Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
          volume id:2  size:77267536  file_count:166  replica_placement:2  version:3  modified_at_second:1766349617
          ec volume id:1 collection: shards:[2 6]
        Disk hdd total size:77267536 file_count:166
      DataNode 192.168.10.111:9003 total size:77267536 file_count:166
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9004 hdd(volume:2/8 active:2 free:6 remote:0)
        Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
          volume id:2  size:77267536  file_count:166  replica_placement:2  version:3  modified_at_second:1766349617
          volume id:3  size:88967096  file_count:172  replica_placement:2  version:3  modified_at_second:1766349617
          ec volume id:1 collection: shards:[3 7]
        Disk hdd total size:166234632 file_count:338
      DataNode 192.168.10.111:9004 total size:166234632 file_count:338
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9005 hdd(volume:0/8 active:0 free:8 remote:0)
        Disk hdd(volume:0/8 active:0 free:8 remote:0) id:0
          ec volume id:1 collection: shards:[8 9 10 11 12 13]
        Disk hdd total size:0 file_count:0
    Rack DefaultRack total size:498703896 file_count:1014
  DataCenter DefaultDataCenter total size:498703896 file_count:1014
total size:498703896 file_count:1014
```

...and after:

```
> volume.list
Topology volumeSizeLimit:30000 MB hdd(volume:6/40 active:6 free:33 remote:0)
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9001 hdd(volume:1/8 active:1 free:7 remote:0)
        Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
          volume id:2  size:81761800  file_count:161  replica_placement:2  version:3  modified_at_second:1766349495
          ec volume id:1 collection: shards:[1 5 9] sizes:[1:8.00 MiB 5:8.00 MiB 9:8.00 MiB] total:24.00 MiB
        Disk hdd total size:81761800 file_count:161
      DataNode 192.168.10.111:9001 total size:81761800 file_count:161
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9002 hdd(volume:1/8 active:1 free:7 remote:0)
        Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
          volume id:3  size:88678712  file_count:170  replica_placement:2  version:3  modified_at_second:1766349495
          ec volume id:1 collection: shards:[11 12 13] sizes:[11:8.00 MiB 12:8.00 MiB 13:8.00 MiB] total:24.00 MiB
        Disk hdd total size:88678712 file_count:170
      DataNode 192.168.10.111:9002 total size:88678712 file_count:170
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9003 hdd(volume:2/8 active:2 free:6 remote:0)
        Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
          volume id:2  size:81761800  file_count:161  replica_placement:2  version:3  modified_at_second:1766349495
          volume id:3  size:88678712  file_count:170  replica_placement:2  version:3  modified_at_second:1766349495
          ec volume id:1 collection: shards:[0 4 8] sizes:[0:8.00 MiB 4:8.00 MiB 8:8.00 MiB] total:24.00 MiB
        Disk hdd total size:170440512 file_count:331
      DataNode 192.168.10.111:9003 total size:170440512 file_count:331
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9004 hdd(volume:2/8 active:2 free:6 remote:0)
        Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
          volume id:2  size:81761800  file_count:161  replica_placement:2  version:3  modified_at_second:1766349495
          volume id:3  size:88678712  file_count:170  replica_placement:2  version:3  modified_at_second:1766349495
          ec volume id:1 collection: shards:[2 6 10] sizes:[2:8.00 MiB 6:8.00 MiB 10:8.00 MiB] total:24.00 MiB
        Disk hdd total size:170440512 file_count:331
      DataNode 192.168.10.111:9004 total size:170440512 file_count:331
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9005 hdd(volume:0/8 active:0 free:8 remote:0)
        Disk hdd(volume:0/8 active:0 free:8 remote:0) id:0
          ec volume id:1 collection: shards:[3 7] sizes:[3:8.00 MiB 7:8.00 MiB] total:16.00 MiB
        Disk hdd total size:0 file_count:0
    Rack DefaultRack total size:511321536 file_count:993
  DataCenter DefaultDataCenter total size:511321536 file_count:993
total size:511321536 file_count:993
```
This commit is contained in:
Lisandro Pin
2025-12-29 04:30:42 +01:00
committed by GitHub
parent 2b529e310d
commit 6b98b52acc
28 changed files with 801 additions and 773 deletions

View File

@@ -35,18 +35,18 @@ func (dn *DataNode) UpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo)
if actualEcShards, ok := actualEcShardMap[vid]; !ok {
// dn registered ec shards not found in the new set of ec shards
deletedShards = append(deletedShards, ecShards)
deletedShardCount += ecShards.ShardIdCount()
deletedShardCount += ecShards.ShardsInfo.Count()
} else {
// found, but maybe the actual shard could be missing
a := actualEcShards.Minus(ecShards)
if a.ShardIdCount() > 0 {
if a.ShardsInfo.Count() > 0 {
newShards = append(newShards, a)
newShardCount += a.ShardIdCount()
newShardCount += a.ShardsInfo.Count()
}
d := ecShards.Minus(actualEcShards)
if d.ShardIdCount() > 0 {
if d.ShardsInfo.Count() > 0 {
deletedShards = append(deletedShards, d)
deletedShardCount += d.ShardIdCount()
deletedShardCount += d.ShardsInfo.Count()
}
}
@@ -67,7 +67,7 @@ func (dn *DataNode) UpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo)
disk := dn.getOrCreateDisk(ecShards.DiskType)
disk.UpAdjustDiskUsageDelta(types.ToDiskType(ecShards.DiskType), &DiskUsageCounts{
ecShardCount: int64(ecShards.ShardIdCount()),
ecShardCount: int64(ecShards.ShardsInfo.Count()),
})
}
@@ -106,7 +106,6 @@ func (dn *DataNode) doUpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo
}
func (dn *DataNode) DeltaUpdateEcShards(newShards, deletedShards []*erasure_coding.EcVolumeInfo) {
for _, newShard := range newShards {
dn.AddOrUpdateEcShard(newShard)
}

View File

@@ -22,20 +22,18 @@ func (d *Disk) AddOrUpdateEcShard(s *erasure_coding.EcVolumeInfo) {
delta := 0
if existing, ok := d.ecShards[s.VolumeId]; !ok {
d.ecShards[s.VolumeId] = s
delta = s.ShardBits.ShardIdCount()
delta = s.ShardsInfo.Count()
} else {
oldCount := existing.ShardBits.ShardIdCount()
existing.ShardBits = existing.ShardBits.Plus(s.ShardBits)
delta = existing.ShardBits.ShardIdCount() - oldCount
oldCount := existing.ShardsInfo.Count()
existing.ShardsInfo.Add(s.ShardsInfo)
delta = existing.ShardsInfo.Count() - oldCount
}
if delta == 0 {
return
if delta != 0 {
d.UpAdjustDiskUsageDelta(types.ToDiskType(string(d.Id())), &DiskUsageCounts{
ecShardCount: int64(delta),
})
}
d.UpAdjustDiskUsageDelta(types.ToDiskType(string(d.Id())), &DiskUsageCounts{
ecShardCount: int64(delta),
})
}
func (d *Disk) DeleteEcShard(s *erasure_coding.EcVolumeInfo) {
@@ -43,17 +41,16 @@ func (d *Disk) DeleteEcShard(s *erasure_coding.EcVolumeInfo) {
defer d.ecShardsLock.Unlock()
if existing, ok := d.ecShards[s.VolumeId]; ok {
oldCount := existing.ShardBits.ShardIdCount()
existing.ShardBits = existing.ShardBits.Minus(s.ShardBits)
delta := existing.ShardBits.ShardIdCount() - oldCount
oldCount := existing.ShardsInfo.Count()
existing.ShardsInfo.Subtract(s.ShardsInfo)
delta := existing.ShardsInfo.Count() - oldCount
if delta != 0 {
d.UpAdjustDiskUsageDelta(types.ToDiskType(string(d.Id())), &DiskUsageCounts{
ecShardCount: int64(delta),
})
}
if existing.ShardBits.ShardIdCount() == 0 {
if existing.ShardsInfo.Count() == 0 {
delete(d.ecShards, s.VolumeId)
}
}
@@ -61,7 +58,6 @@ func (d *Disk) DeleteEcShard(s *erasure_coding.EcVolumeInfo) {
}
func (d *Disk) HasVolumesById(id needle.VolumeId) (hasVolumeId bool) {
// check whether normal volumes has this volume id
d.RLock()
_, ok := d.volumes[id]
@@ -83,5 +79,4 @@ func (d *Disk) HasVolumesById(id needle.VolumeId) (hasVolumeId bool) {
d.ecShardsLock.RUnlock()
return
}

View File

@@ -22,11 +22,10 @@ func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInf
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(shardInfo.Id),
Collection: shardInfo.Collection,
ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits),
ShardsInfo: erasure_coding.ShardsInfoFromVolumeEcShardInformationMessage(shardInfo),
DiskType: shardInfo.DiskType,
DiskId: shardInfo.DiskId,
ExpireAtSec: shardInfo.ExpireAtSec,
ShardSizes: shardInfo.ShardSizes,
}
shards = append(shards, ecVolumeInfo)
@@ -50,11 +49,10 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(shardInfo.Id),
Collection: shardInfo.Collection,
ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits),
ShardsInfo: erasure_coding.ShardsInfoFromVolumeEcShardInformationMessage(shardInfo),
DiskType: shardInfo.DiskType,
DiskId: shardInfo.DiskId,
ExpireAtSec: shardInfo.ExpireAtSec,
ShardSizes: shardInfo.ShardSizes,
}
newShards = append(newShards, ecVolumeInfo)
@@ -64,11 +62,10 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(shardInfo.Id),
Collection: shardInfo.Collection,
ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits),
ShardsInfo: erasure_coding.ShardsInfoFromVolumeEcShardInformationMessage(shardInfo),
DiskType: shardInfo.DiskType,
DiskId: shardInfo.DiskId,
ExpireAtSec: shardInfo.ExpireAtSec,
ShardSizes: shardInfo.ShardSizes,
}
deletedShards = append(deletedShards, ecVolumeInfo)
@@ -124,31 +121,31 @@ func (loc *EcShardLocations) DeleteShard(shardId erasure_coding.ShardId, dn *Dat
return true
}
func (t *Topology) RegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, dn *DataNode) {
func (t *Topology) RegisterEcShards(ecvi *erasure_coding.EcVolumeInfo, dn *DataNode) {
t.ecShardMapLock.Lock()
defer t.ecShardMapLock.Unlock()
locations, found := t.ecShardMap[ecShardInfos.VolumeId]
locations, found := t.ecShardMap[ecvi.VolumeId]
if !found {
locations = NewEcShardLocations(ecShardInfos.Collection)
t.ecShardMap[ecShardInfos.VolumeId] = locations
locations = NewEcShardLocations(ecvi.Collection)
t.ecShardMap[ecvi.VolumeId] = locations
}
for _, shardId := range ecShardInfos.ShardIds() {
for _, shardId := range ecvi.ShardsInfo.Ids() {
locations.AddShard(shardId, dn)
}
}
func (t *Topology) UnRegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, dn *DataNode) {
glog.Infof("removing ec shard info:%+v", ecShardInfos)
func (t *Topology) UnRegisterEcShards(ecvi *erasure_coding.EcVolumeInfo, dn *DataNode) {
glog.Infof("removing ec shard info:%+v", ecvi)
t.ecShardMapLock.Lock()
defer t.ecShardMapLock.Unlock()
locations, found := t.ecShardMap[ecShardInfos.VolumeId]
locations, found := t.ecShardMap[ecvi.VolumeId]
if !found {
return
}
for _, shardId := range ecShardInfos.ShardIds() {
for _, shardId := range ecvi.ShardsInfo.Ids() {
locations.DeleteShard(shardId, dn)
}
}

View File

@@ -351,10 +351,12 @@ func TestListCollections(t *testing.T) {
topo.RegisterEcShards(&erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(4444),
Collection: "ec_collection_a",
ShardsInfo: erasure_coding.NewShardsInfo(),
}, dn)
topo.RegisterEcShards(&erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(5555),
Collection: "ec_collection_b",
ShardsInfo: erasure_coding.NewShardsInfo(),
}, dn)
testCases := []struct {