Fix reporting of EC shard sizes from nodes to masters. (#7835)
SeaweedFS tracks EC shard sizes on topology data stuctures, but this information is never
relayed to master servers :( The end result is that commands reporting disk usage, such
as `volume.list` and `cluster.status`, yield incorrect figures when EC shards are present.
As an example for a simple 5-node test cluster, before...
```
> volume.list
Topology volumeSizeLimit:30000 MB hdd(volume:6/40 active:6 free:33 remote:0)
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9001 hdd(volume:1/8 active:1 free:7 remote:0)
Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
volume id:3 size:88967096 file_count:172 replica_placement:2 version:3 modified_at_second:1766349617
ec volume id:1 collection: shards:[1 5]
Disk hdd total size:88967096 file_count:172
DataNode 192.168.10.111:9001 total size:88967096 file_count:172
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9002 hdd(volume:2/8 active:2 free:6 remote:0)
Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
volume id:2 size:77267536 file_count:166 replica_placement:2 version:3 modified_at_second:1766349617
volume id:3 size:88967096 file_count:172 replica_placement:2 version:3 modified_at_second:1766349617
ec volume id:1 collection: shards:[0 4]
Disk hdd total size:166234632 file_count:338
DataNode 192.168.10.111:9002 total size:166234632 file_count:338
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9003 hdd(volume:1/8 active:1 free:7 remote:0)
Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
volume id:2 size:77267536 file_count:166 replica_placement:2 version:3 modified_at_second:1766349617
ec volume id:1 collection: shards:[2 6]
Disk hdd total size:77267536 file_count:166
DataNode 192.168.10.111:9003 total size:77267536 file_count:166
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9004 hdd(volume:2/8 active:2 free:6 remote:0)
Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
volume id:2 size:77267536 file_count:166 replica_placement:2 version:3 modified_at_second:1766349617
volume id:3 size:88967096 file_count:172 replica_placement:2 version:3 modified_at_second:1766349617
ec volume id:1 collection: shards:[3 7]
Disk hdd total size:166234632 file_count:338
DataNode 192.168.10.111:9004 total size:166234632 file_count:338
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9005 hdd(volume:0/8 active:0 free:8 remote:0)
Disk hdd(volume:0/8 active:0 free:8 remote:0) id:0
ec volume id:1 collection: shards:[8 9 10 11 12 13]
Disk hdd total size:0 file_count:0
Rack DefaultRack total size:498703896 file_count:1014
DataCenter DefaultDataCenter total size:498703896 file_count:1014
total size:498703896 file_count:1014
```
...and after:
```
> volume.list
Topology volumeSizeLimit:30000 MB hdd(volume:6/40 active:6 free:33 remote:0)
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9001 hdd(volume:1/8 active:1 free:7 remote:0)
Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
volume id:2 size:81761800 file_count:161 replica_placement:2 version:3 modified_at_second:1766349495
ec volume id:1 collection: shards:[1 5 9] sizes:[1:8.00 MiB 5:8.00 MiB 9:8.00 MiB] total:24.00 MiB
Disk hdd total size:81761800 file_count:161
DataNode 192.168.10.111:9001 total size:81761800 file_count:161
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9002 hdd(volume:1/8 active:1 free:7 remote:0)
Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
volume id:3 size:88678712 file_count:170 replica_placement:2 version:3 modified_at_second:1766349495
ec volume id:1 collection: shards:[11 12 13] sizes:[11:8.00 MiB 12:8.00 MiB 13:8.00 MiB] total:24.00 MiB
Disk hdd total size:88678712 file_count:170
DataNode 192.168.10.111:9002 total size:88678712 file_count:170
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9003 hdd(volume:2/8 active:2 free:6 remote:0)
Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
volume id:2 size:81761800 file_count:161 replica_placement:2 version:3 modified_at_second:1766349495
volume id:3 size:88678712 file_count:170 replica_placement:2 version:3 modified_at_second:1766349495
ec volume id:1 collection: shards:[0 4 8] sizes:[0:8.00 MiB 4:8.00 MiB 8:8.00 MiB] total:24.00 MiB
Disk hdd total size:170440512 file_count:331
DataNode 192.168.10.111:9003 total size:170440512 file_count:331
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9004 hdd(volume:2/8 active:2 free:6 remote:0)
Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
volume id:2 size:81761800 file_count:161 replica_placement:2 version:3 modified_at_second:1766349495
volume id:3 size:88678712 file_count:170 replica_placement:2 version:3 modified_at_second:1766349495
ec volume id:1 collection: shards:[2 6 10] sizes:[2:8.00 MiB 6:8.00 MiB 10:8.00 MiB] total:24.00 MiB
Disk hdd total size:170440512 file_count:331
DataNode 192.168.10.111:9004 total size:170440512 file_count:331
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9005 hdd(volume:0/8 active:0 free:8 remote:0)
Disk hdd(volume:0/8 active:0 free:8 remote:0) id:0
ec volume id:1 collection: shards:[3 7] sizes:[3:8.00 MiB 7:8.00 MiB] total:16.00 MiB
Disk hdd total size:0 file_count:0
Rack DefaultRack total size:511321536 file_count:993
DataCenter DefaultDataCenter total size:511321536 file_count:993
total size:511321536 file_count:993
```
This commit is contained in:
@@ -15,9 +15,9 @@ func TestEcShardMapRegister(t *testing.T) {
|
||||
|
||||
// Create test nodes with EC shards
|
||||
node1 := newEcNode("dc1", "rack1", "node1", 100).
|
||||
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6})
|
||||
addEcVolumeAndShardsForTest(1, "c1", []erasure_coding.ShardId{0, 1, 2, 3, 4, 5, 6})
|
||||
node2 := newEcNode("dc1", "rack1", "node2", 100).
|
||||
addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13})
|
||||
addEcVolumeAndShardsForTest(1, "c1", []erasure_coding.ShardId{7, 8, 9, 10, 11, 12, 13})
|
||||
|
||||
ecShardMap.registerEcNode(node1, "c1")
|
||||
ecShardMap.registerEcNode(node2, "c1")
|
||||
@@ -51,15 +51,15 @@ func TestEcShardMapRegister(t *testing.T) {
|
||||
func TestEcShardMapShardCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
shardIds []uint32
|
||||
shardIds []erasure_coding.ShardId
|
||||
expectedCount int
|
||||
}{
|
||||
{"all shards", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 14},
|
||||
{"data shards only", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, 10},
|
||||
{"parity shards only", []uint32{10, 11, 12, 13}, 4},
|
||||
{"missing some shards", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8}, 9},
|
||||
{"single shard", []uint32{0}, 1},
|
||||
{"no shards", []uint32{}, 0},
|
||||
{"all shards", []erasure_coding.ShardId{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 14},
|
||||
{"data shards only", []erasure_coding.ShardId{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, 10},
|
||||
{"parity shards only", []erasure_coding.ShardId{10, 11, 12, 13}, 4},
|
||||
{"missing some shards", []erasure_coding.ShardId{0, 1, 2, 3, 4, 5, 6, 7, 8}, 9},
|
||||
{"single shard", []erasure_coding.ShardId{0}, 1},
|
||||
{"no shards", []erasure_coding.ShardId{}, 0},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -85,7 +85,7 @@ func TestRebuildEcVolumesInsufficientShards(t *testing.T) {
|
||||
|
||||
// Create a volume with insufficient shards (less than DataShardsCount)
|
||||
node1 := newEcNode("dc1", "rack1", "node1", 100).
|
||||
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4}) // Only 5 shards
|
||||
addEcVolumeAndShardsForTest(1, "c1", []erasure_coding.ShardId{0, 1, 2, 3, 4}) // Only 5 shards
|
||||
|
||||
erb := &ecRebuilder{
|
||||
commandEnv: &CommandEnv{
|
||||
@@ -114,7 +114,7 @@ func TestRebuildEcVolumesCompleteVolume(t *testing.T) {
|
||||
|
||||
// Create a volume with all shards
|
||||
node1 := newEcNode("dc1", "rack1", "node1", 100).
|
||||
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
|
||||
addEcVolumeAndShardsForTest(1, "c1", []erasure_coding.ShardId{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
|
||||
|
||||
erb := &ecRebuilder{
|
||||
commandEnv: &CommandEnv{
|
||||
@@ -146,7 +146,7 @@ func TestRebuildEcVolumesInsufficientSpace(t *testing.T) {
|
||||
// Node has 10 local shards, missing 4 shards (10,11,12,13), so needs 4 free slots
|
||||
// Set free slots to 3 (insufficient)
|
||||
node1 := newEcNode("dc1", "rack1", "node1", 3). // Only 3 free slots, need 4
|
||||
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
|
||||
addEcVolumeAndShardsForTest(1, "c1", []erasure_coding.ShardId{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
|
||||
|
||||
erb := &ecRebuilder{
|
||||
commandEnv: &CommandEnv{
|
||||
@@ -180,11 +180,11 @@ func TestMultipleNodesWithShards(t *testing.T) {
|
||||
|
||||
// Create 3 nodes with different shards
|
||||
node1 := newEcNode("dc1", "rack1", "node1", 100).
|
||||
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3})
|
||||
addEcVolumeAndShardsForTest(1, "c1", []erasure_coding.ShardId{0, 1, 2, 3})
|
||||
node2 := newEcNode("dc1", "rack1", "node2", 100).
|
||||
addEcVolumeAndShardsForTest(1, "c1", []uint32{4, 5, 6, 7})
|
||||
addEcVolumeAndShardsForTest(1, "c1", []erasure_coding.ShardId{4, 5, 6, 7})
|
||||
node3 := newEcNode("dc1", "rack1", "node3", 100).
|
||||
addEcVolumeAndShardsForTest(1, "c1", []uint32{8, 9})
|
||||
addEcVolumeAndShardsForTest(1, "c1", []erasure_coding.ShardId{8, 9})
|
||||
|
||||
ecShardMap.registerEcNode(node1, "c1")
|
||||
ecShardMap.registerEcNode(node2, "c1")
|
||||
@@ -222,9 +222,9 @@ func TestDuplicateShards(t *testing.T) {
|
||||
|
||||
// Create 2 nodes with overlapping shards (both have shard 0)
|
||||
node1 := newEcNode("dc1", "rack1", "node1", 100).
|
||||
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3})
|
||||
addEcVolumeAndShardsForTest(1, "c1", []erasure_coding.ShardId{0, 1, 2, 3})
|
||||
node2 := newEcNode("dc1", "rack1", "node2", 100).
|
||||
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 4, 5, 6}) // Duplicate shard 0
|
||||
addEcVolumeAndShardsForTest(1, "c1", []erasure_coding.ShardId{0, 4, 5, 6}) // Duplicate shard 0
|
||||
|
||||
ecShardMap.registerEcNode(node1, "c1")
|
||||
ecShardMap.registerEcNode(node2, "c1")
|
||||
|
||||
Reference in New Issue
Block a user