Files
seaweedFS/weed/topology/topology_ec.go
Lisandro Pin 6b98b52acc Fix reporting of EC shard sizes from nodes to masters. (#7835)
SeaweedFS tracks EC shard sizes on topology data stuctures, but this information is never
relayed to master servers :( The end result is that commands reporting disk usage, such
as `volume.list` and `cluster.status`, yield incorrect figures when EC shards are present.

As an example for a simple 5-node test cluster, before...

```
> volume.list
Topology volumeSizeLimit:30000 MB hdd(volume:6/40 active:6 free:33 remote:0)
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9001 hdd(volume:1/8 active:1 free:7 remote:0)
        Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
          volume id:3  size:88967096  file_count:172  replica_placement:2  version:3  modified_at_second:1766349617
          ec volume id:1 collection: shards:[1 5]
        Disk hdd total size:88967096 file_count:172
      DataNode 192.168.10.111:9001 total size:88967096 file_count:172
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9002 hdd(volume:2/8 active:2 free:6 remote:0)
        Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
          volume id:2  size:77267536  file_count:166  replica_placement:2  version:3  modified_at_second:1766349617
          volume id:3  size:88967096  file_count:172  replica_placement:2  version:3  modified_at_second:1766349617
          ec volume id:1 collection: shards:[0 4]
        Disk hdd total size:166234632 file_count:338
      DataNode 192.168.10.111:9002 total size:166234632 file_count:338
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9003 hdd(volume:1/8 active:1 free:7 remote:0)
        Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
          volume id:2  size:77267536  file_count:166  replica_placement:2  version:3  modified_at_second:1766349617
          ec volume id:1 collection: shards:[2 6]
        Disk hdd total size:77267536 file_count:166
      DataNode 192.168.10.111:9003 total size:77267536 file_count:166
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9004 hdd(volume:2/8 active:2 free:6 remote:0)
        Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
          volume id:2  size:77267536  file_count:166  replica_placement:2  version:3  modified_at_second:1766349617
          volume id:3  size:88967096  file_count:172  replica_placement:2  version:3  modified_at_second:1766349617
          ec volume id:1 collection: shards:[3 7]
        Disk hdd total size:166234632 file_count:338
      DataNode 192.168.10.111:9004 total size:166234632 file_count:338
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9005 hdd(volume:0/8 active:0 free:8 remote:0)
        Disk hdd(volume:0/8 active:0 free:8 remote:0) id:0
          ec volume id:1 collection: shards:[8 9 10 11 12 13]
        Disk hdd total size:0 file_count:0
    Rack DefaultRack total size:498703896 file_count:1014
  DataCenter DefaultDataCenter total size:498703896 file_count:1014
total size:498703896 file_count:1014
```

...and after:

```
> volume.list
Topology volumeSizeLimit:30000 MB hdd(volume:6/40 active:6 free:33 remote:0)
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9001 hdd(volume:1/8 active:1 free:7 remote:0)
        Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
          volume id:2  size:81761800  file_count:161  replica_placement:2  version:3  modified_at_second:1766349495
          ec volume id:1 collection: shards:[1 5 9] sizes:[1:8.00 MiB 5:8.00 MiB 9:8.00 MiB] total:24.00 MiB
        Disk hdd total size:81761800 file_count:161
      DataNode 192.168.10.111:9001 total size:81761800 file_count:161
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9002 hdd(volume:1/8 active:1 free:7 remote:0)
        Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
          volume id:3  size:88678712  file_count:170  replica_placement:2  version:3  modified_at_second:1766349495
          ec volume id:1 collection: shards:[11 12 13] sizes:[11:8.00 MiB 12:8.00 MiB 13:8.00 MiB] total:24.00 MiB
        Disk hdd total size:88678712 file_count:170
      DataNode 192.168.10.111:9002 total size:88678712 file_count:170
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9003 hdd(volume:2/8 active:2 free:6 remote:0)
        Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
          volume id:2  size:81761800  file_count:161  replica_placement:2  version:3  modified_at_second:1766349495
          volume id:3  size:88678712  file_count:170  replica_placement:2  version:3  modified_at_second:1766349495
          ec volume id:1 collection: shards:[0 4 8] sizes:[0:8.00 MiB 4:8.00 MiB 8:8.00 MiB] total:24.00 MiB
        Disk hdd total size:170440512 file_count:331
      DataNode 192.168.10.111:9003 total size:170440512 file_count:331
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9004 hdd(volume:2/8 active:2 free:6 remote:0)
        Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
          volume id:2  size:81761800  file_count:161  replica_placement:2  version:3  modified_at_second:1766349495
          volume id:3  size:88678712  file_count:170  replica_placement:2  version:3  modified_at_second:1766349495
          ec volume id:1 collection: shards:[2 6 10] sizes:[2:8.00 MiB 6:8.00 MiB 10:8.00 MiB] total:24.00 MiB
        Disk hdd total size:170440512 file_count:331
      DataNode 192.168.10.111:9004 total size:170440512 file_count:331
  DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
    Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
      DataNode 192.168.10.111:9005 hdd(volume:0/8 active:0 free:8 remote:0)
        Disk hdd(volume:0/8 active:0 free:8 remote:0) id:0
          ec volume id:1 collection: shards:[3 7] sizes:[3:8.00 MiB 7:8.00 MiB] total:16.00 MiB
        Disk hdd total size:0 file_count:0
    Rack DefaultRack total size:511321536 file_count:993
  DataCenter DefaultDataCenter total size:511321536 file_count:993
total size:511321536 file_count:993
```
2025-12-28 19:30:42 -08:00

199 lines
5.6 KiB
Go

package topology
import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
)
type EcShardLocations struct {
Collection string
// Use MaxShardCount (32) to support custom EC ratios
Locations [erasure_coding.MaxShardCount][]*DataNode
}
func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInformationMessage, dn *DataNode) (newShards, deletedShards []*erasure_coding.EcVolumeInfo) {
// convert into in memory struct storage.VolumeInfo
var shards []*erasure_coding.EcVolumeInfo
for _, shardInfo := range shardInfos {
// Create EcVolumeInfo directly with optimized format
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(shardInfo.Id),
Collection: shardInfo.Collection,
ShardsInfo: erasure_coding.ShardsInfoFromVolumeEcShardInformationMessage(shardInfo),
DiskType: shardInfo.DiskType,
DiskId: shardInfo.DiskId,
ExpireAtSec: shardInfo.ExpireAtSec,
}
shards = append(shards, ecVolumeInfo)
}
// find out the delta volumes
newShards, deletedShards = dn.UpdateEcShards(shards)
for _, v := range newShards {
t.RegisterEcShards(v, dn)
}
for _, v := range deletedShards {
t.UnRegisterEcShards(v, dn)
}
return
}
func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards []*master_pb.VolumeEcShardInformationMessage, dn *DataNode) {
// convert into in memory struct storage.VolumeInfo
var newShards, deletedShards []*erasure_coding.EcVolumeInfo
for _, shardInfo := range newEcShards {
// Create EcVolumeInfo directly with optimized format
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(shardInfo.Id),
Collection: shardInfo.Collection,
ShardsInfo: erasure_coding.ShardsInfoFromVolumeEcShardInformationMessage(shardInfo),
DiskType: shardInfo.DiskType,
DiskId: shardInfo.DiskId,
ExpireAtSec: shardInfo.ExpireAtSec,
}
newShards = append(newShards, ecVolumeInfo)
}
for _, shardInfo := range deletedEcShards {
// Create EcVolumeInfo directly with optimized format
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(shardInfo.Id),
Collection: shardInfo.Collection,
ShardsInfo: erasure_coding.ShardsInfoFromVolumeEcShardInformationMessage(shardInfo),
DiskType: shardInfo.DiskType,
DiskId: shardInfo.DiskId,
ExpireAtSec: shardInfo.ExpireAtSec,
}
deletedShards = append(deletedShards, ecVolumeInfo)
}
dn.DeltaUpdateEcShards(newShards, deletedShards)
for _, v := range newShards {
t.RegisterEcShards(v, dn)
}
for _, v := range deletedShards {
t.UnRegisterEcShards(v, dn)
}
}
func NewEcShardLocations(collection string) *EcShardLocations {
return &EcShardLocations{
Collection: collection,
}
}
func (loc *EcShardLocations) AddShard(shardId erasure_coding.ShardId, dn *DataNode) (added bool) {
// Defensive bounds check to prevent panic with out-of-range shard IDs
if int(shardId) >= erasure_coding.MaxShardCount {
return false
}
dataNodes := loc.Locations[shardId]
for _, n := range dataNodes {
if n.Id() == dn.Id() {
return false
}
}
loc.Locations[shardId] = append(dataNodes, dn)
return true
}
func (loc *EcShardLocations) DeleteShard(shardId erasure_coding.ShardId, dn *DataNode) (deleted bool) {
// Defensive bounds check to prevent panic with out-of-range shard IDs
if int(shardId) >= erasure_coding.MaxShardCount {
return false
}
dataNodes := loc.Locations[shardId]
foundIndex := -1
for index, n := range dataNodes {
if n.Id() == dn.Id() {
foundIndex = index
}
}
if foundIndex < 0 {
return false
}
loc.Locations[shardId] = append(dataNodes[:foundIndex], dataNodes[foundIndex+1:]...)
return true
}
func (t *Topology) RegisterEcShards(ecvi *erasure_coding.EcVolumeInfo, dn *DataNode) {
t.ecShardMapLock.Lock()
defer t.ecShardMapLock.Unlock()
locations, found := t.ecShardMap[ecvi.VolumeId]
if !found {
locations = NewEcShardLocations(ecvi.Collection)
t.ecShardMap[ecvi.VolumeId] = locations
}
for _, shardId := range ecvi.ShardsInfo.Ids() {
locations.AddShard(shardId, dn)
}
}
func (t *Topology) UnRegisterEcShards(ecvi *erasure_coding.EcVolumeInfo, dn *DataNode) {
glog.Infof("removing ec shard info:%+v", ecvi)
t.ecShardMapLock.Lock()
defer t.ecShardMapLock.Unlock()
locations, found := t.ecShardMap[ecvi.VolumeId]
if !found {
return
}
for _, shardId := range ecvi.ShardsInfo.Ids() {
locations.DeleteShard(shardId, dn)
}
}
func (t *Topology) LookupEcShards(vid needle.VolumeId) (locations *EcShardLocations, found bool) {
t.ecShardMapLock.RLock()
defer t.ecShardMapLock.RUnlock()
locations, found = t.ecShardMap[vid]
return
}
func (t *Topology) ListEcServersByCollection(collection string) (dataNodes []pb.ServerAddress) {
t.ecShardMapLock.RLock()
defer t.ecShardMapLock.RUnlock()
dateNodeMap := make(map[pb.ServerAddress]bool)
for _, ecVolumeLocation := range t.ecShardMap {
if ecVolumeLocation.Collection == collection {
for _, locations := range ecVolumeLocation.Locations {
for _, loc := range locations {
dateNodeMap[loc.ServerAddress()] = true
}
}
}
}
for k, _ := range dateNodeMap {
dataNodes = append(dataNodes, k)
}
return
}
func (t *Topology) DeleteEcCollection(collection string) {
t.ecShardMapLock.Lock()
defer t.ecShardMapLock.Unlock()
var vids []needle.VolumeId
for vid, ecVolumeLocation := range t.ecShardMap {
if ecVolumeLocation.Collection == collection {
vids = append(vids, vid)
}
}
for _, vid := range vids {
delete(t.ecShardMap, vid)
}
}