Fix reporting of EC shard sizes from nodes to masters. (#7835)
SeaweedFS tracks EC shard sizes on topology data stuctures, but this information is never
relayed to master servers :( The end result is that commands reporting disk usage, such
as `volume.list` and `cluster.status`, yield incorrect figures when EC shards are present.
As an example for a simple 5-node test cluster, before...
```
> volume.list
Topology volumeSizeLimit:30000 MB hdd(volume:6/40 active:6 free:33 remote:0)
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9001 hdd(volume:1/8 active:1 free:7 remote:0)
Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
volume id:3 size:88967096 file_count:172 replica_placement:2 version:3 modified_at_second:1766349617
ec volume id:1 collection: shards:[1 5]
Disk hdd total size:88967096 file_count:172
DataNode 192.168.10.111:9001 total size:88967096 file_count:172
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9002 hdd(volume:2/8 active:2 free:6 remote:0)
Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
volume id:2 size:77267536 file_count:166 replica_placement:2 version:3 modified_at_second:1766349617
volume id:3 size:88967096 file_count:172 replica_placement:2 version:3 modified_at_second:1766349617
ec volume id:1 collection: shards:[0 4]
Disk hdd total size:166234632 file_count:338
DataNode 192.168.10.111:9002 total size:166234632 file_count:338
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9003 hdd(volume:1/8 active:1 free:7 remote:0)
Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
volume id:2 size:77267536 file_count:166 replica_placement:2 version:3 modified_at_second:1766349617
ec volume id:1 collection: shards:[2 6]
Disk hdd total size:77267536 file_count:166
DataNode 192.168.10.111:9003 total size:77267536 file_count:166
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9004 hdd(volume:2/8 active:2 free:6 remote:0)
Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
volume id:2 size:77267536 file_count:166 replica_placement:2 version:3 modified_at_second:1766349617
volume id:3 size:88967096 file_count:172 replica_placement:2 version:3 modified_at_second:1766349617
ec volume id:1 collection: shards:[3 7]
Disk hdd total size:166234632 file_count:338
DataNode 192.168.10.111:9004 total size:166234632 file_count:338
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9005 hdd(volume:0/8 active:0 free:8 remote:0)
Disk hdd(volume:0/8 active:0 free:8 remote:0) id:0
ec volume id:1 collection: shards:[8 9 10 11 12 13]
Disk hdd total size:0 file_count:0
Rack DefaultRack total size:498703896 file_count:1014
DataCenter DefaultDataCenter total size:498703896 file_count:1014
total size:498703896 file_count:1014
```
...and after:
```
> volume.list
Topology volumeSizeLimit:30000 MB hdd(volume:6/40 active:6 free:33 remote:0)
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9001 hdd(volume:1/8 active:1 free:7 remote:0)
Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
volume id:2 size:81761800 file_count:161 replica_placement:2 version:3 modified_at_second:1766349495
ec volume id:1 collection: shards:[1 5 9] sizes:[1:8.00 MiB 5:8.00 MiB 9:8.00 MiB] total:24.00 MiB
Disk hdd total size:81761800 file_count:161
DataNode 192.168.10.111:9001 total size:81761800 file_count:161
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9002 hdd(volume:1/8 active:1 free:7 remote:0)
Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
volume id:3 size:88678712 file_count:170 replica_placement:2 version:3 modified_at_second:1766349495
ec volume id:1 collection: shards:[11 12 13] sizes:[11:8.00 MiB 12:8.00 MiB 13:8.00 MiB] total:24.00 MiB
Disk hdd total size:88678712 file_count:170
DataNode 192.168.10.111:9002 total size:88678712 file_count:170
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9003 hdd(volume:2/8 active:2 free:6 remote:0)
Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
volume id:2 size:81761800 file_count:161 replica_placement:2 version:3 modified_at_second:1766349495
volume id:3 size:88678712 file_count:170 replica_placement:2 version:3 modified_at_second:1766349495
ec volume id:1 collection: shards:[0 4 8] sizes:[0:8.00 MiB 4:8.00 MiB 8:8.00 MiB] total:24.00 MiB
Disk hdd total size:170440512 file_count:331
DataNode 192.168.10.111:9003 total size:170440512 file_count:331
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9004 hdd(volume:2/8 active:2 free:6 remote:0)
Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
volume id:2 size:81761800 file_count:161 replica_placement:2 version:3 modified_at_second:1766349495
volume id:3 size:88678712 file_count:170 replica_placement:2 version:3 modified_at_second:1766349495
ec volume id:1 collection: shards:[2 6 10] sizes:[2:8.00 MiB 6:8.00 MiB 10:8.00 MiB] total:24.00 MiB
Disk hdd total size:170440512 file_count:331
DataNode 192.168.10.111:9004 total size:170440512 file_count:331
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9005 hdd(volume:0/8 active:0 free:8 remote:0)
Disk hdd(volume:0/8 active:0 free:8 remote:0) id:0
ec volume id:1 collection: shards:[3 7] sizes:[3:8.00 MiB 7:8.00 MiB] total:16.00 MiB
Disk hdd total size:0 file_count:0
Rack DefaultRack total size:511321536 file_count:993
DataCenter DefaultDataCenter total size:511321536 file_count:993
total size:511321536 file_count:993
```
This commit is contained in:
@@ -22,11 +22,10 @@ func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInf
|
||||
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
|
||||
VolumeId: needle.VolumeId(shardInfo.Id),
|
||||
Collection: shardInfo.Collection,
|
||||
ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits),
|
||||
ShardsInfo: erasure_coding.ShardsInfoFromVolumeEcShardInformationMessage(shardInfo),
|
||||
DiskType: shardInfo.DiskType,
|
||||
DiskId: shardInfo.DiskId,
|
||||
ExpireAtSec: shardInfo.ExpireAtSec,
|
||||
ShardSizes: shardInfo.ShardSizes,
|
||||
}
|
||||
|
||||
shards = append(shards, ecVolumeInfo)
|
||||
@@ -50,11 +49,10 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
|
||||
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
|
||||
VolumeId: needle.VolumeId(shardInfo.Id),
|
||||
Collection: shardInfo.Collection,
|
||||
ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits),
|
||||
ShardsInfo: erasure_coding.ShardsInfoFromVolumeEcShardInformationMessage(shardInfo),
|
||||
DiskType: shardInfo.DiskType,
|
||||
DiskId: shardInfo.DiskId,
|
||||
ExpireAtSec: shardInfo.ExpireAtSec,
|
||||
ShardSizes: shardInfo.ShardSizes,
|
||||
}
|
||||
|
||||
newShards = append(newShards, ecVolumeInfo)
|
||||
@@ -64,11 +62,10 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
|
||||
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
|
||||
VolumeId: needle.VolumeId(shardInfo.Id),
|
||||
Collection: shardInfo.Collection,
|
||||
ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits),
|
||||
ShardsInfo: erasure_coding.ShardsInfoFromVolumeEcShardInformationMessage(shardInfo),
|
||||
DiskType: shardInfo.DiskType,
|
||||
DiskId: shardInfo.DiskId,
|
||||
ExpireAtSec: shardInfo.ExpireAtSec,
|
||||
ShardSizes: shardInfo.ShardSizes,
|
||||
}
|
||||
|
||||
deletedShards = append(deletedShards, ecVolumeInfo)
|
||||
@@ -124,31 +121,31 @@ func (loc *EcShardLocations) DeleteShard(shardId erasure_coding.ShardId, dn *Dat
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *Topology) RegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, dn *DataNode) {
|
||||
func (t *Topology) RegisterEcShards(ecvi *erasure_coding.EcVolumeInfo, dn *DataNode) {
|
||||
|
||||
t.ecShardMapLock.Lock()
|
||||
defer t.ecShardMapLock.Unlock()
|
||||
|
||||
locations, found := t.ecShardMap[ecShardInfos.VolumeId]
|
||||
locations, found := t.ecShardMap[ecvi.VolumeId]
|
||||
if !found {
|
||||
locations = NewEcShardLocations(ecShardInfos.Collection)
|
||||
t.ecShardMap[ecShardInfos.VolumeId] = locations
|
||||
locations = NewEcShardLocations(ecvi.Collection)
|
||||
t.ecShardMap[ecvi.VolumeId] = locations
|
||||
}
|
||||
for _, shardId := range ecShardInfos.ShardIds() {
|
||||
for _, shardId := range ecvi.ShardsInfo.Ids() {
|
||||
locations.AddShard(shardId, dn)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Topology) UnRegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, dn *DataNode) {
|
||||
glog.Infof("removing ec shard info:%+v", ecShardInfos)
|
||||
func (t *Topology) UnRegisterEcShards(ecvi *erasure_coding.EcVolumeInfo, dn *DataNode) {
|
||||
glog.Infof("removing ec shard info:%+v", ecvi)
|
||||
t.ecShardMapLock.Lock()
|
||||
defer t.ecShardMapLock.Unlock()
|
||||
|
||||
locations, found := t.ecShardMap[ecShardInfos.VolumeId]
|
||||
locations, found := t.ecShardMap[ecvi.VolumeId]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
for _, shardId := range ecShardInfos.ShardIds() {
|
||||
for _, shardId := range ecvi.ShardsInfo.Ids() {
|
||||
locations.DeleteShard(shardId, dn)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user