SeaweedFS tracks EC shard sizes on topology data stuctures, but this information is never
relayed to master servers :( The end result is that commands reporting disk usage, such
as `volume.list` and `cluster.status`, yield incorrect figures when EC shards are present.
As an example for a simple 5-node test cluster, before...
```
> volume.list
Topology volumeSizeLimit:30000 MB hdd(volume:6/40 active:6 free:33 remote:0)
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9001 hdd(volume:1/8 active:1 free:7 remote:0)
Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
volume id:3 size:88967096 file_count:172 replica_placement:2 version:3 modified_at_second:1766349617
ec volume id:1 collection: shards:[1 5]
Disk hdd total size:88967096 file_count:172
DataNode 192.168.10.111:9001 total size:88967096 file_count:172
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9002 hdd(volume:2/8 active:2 free:6 remote:0)
Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
volume id:2 size:77267536 file_count:166 replica_placement:2 version:3 modified_at_second:1766349617
volume id:3 size:88967096 file_count:172 replica_placement:2 version:3 modified_at_second:1766349617
ec volume id:1 collection: shards:[0 4]
Disk hdd total size:166234632 file_count:338
DataNode 192.168.10.111:9002 total size:166234632 file_count:338
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9003 hdd(volume:1/8 active:1 free:7 remote:0)
Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
volume id:2 size:77267536 file_count:166 replica_placement:2 version:3 modified_at_second:1766349617
ec volume id:1 collection: shards:[2 6]
Disk hdd total size:77267536 file_count:166
DataNode 192.168.10.111:9003 total size:77267536 file_count:166
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9004 hdd(volume:2/8 active:2 free:6 remote:0)
Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
volume id:2 size:77267536 file_count:166 replica_placement:2 version:3 modified_at_second:1766349617
volume id:3 size:88967096 file_count:172 replica_placement:2 version:3 modified_at_second:1766349617
ec volume id:1 collection: shards:[3 7]
Disk hdd total size:166234632 file_count:338
DataNode 192.168.10.111:9004 total size:166234632 file_count:338
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9005 hdd(volume:0/8 active:0 free:8 remote:0)
Disk hdd(volume:0/8 active:0 free:8 remote:0) id:0
ec volume id:1 collection: shards:[8 9 10 11 12 13]
Disk hdd total size:0 file_count:0
Rack DefaultRack total size:498703896 file_count:1014
DataCenter DefaultDataCenter total size:498703896 file_count:1014
total size:498703896 file_count:1014
```
...and after:
```
> volume.list
Topology volumeSizeLimit:30000 MB hdd(volume:6/40 active:6 free:33 remote:0)
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9001 hdd(volume:1/8 active:1 free:7 remote:0)
Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
volume id:2 size:81761800 file_count:161 replica_placement:2 version:3 modified_at_second:1766349495
ec volume id:1 collection: shards:[1 5 9] sizes:[1:8.00 MiB 5:8.00 MiB 9:8.00 MiB] total:24.00 MiB
Disk hdd total size:81761800 file_count:161
DataNode 192.168.10.111:9001 total size:81761800 file_count:161
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9002 hdd(volume:1/8 active:1 free:7 remote:0)
Disk hdd(volume:1/8 active:1 free:7 remote:0) id:0
volume id:3 size:88678712 file_count:170 replica_placement:2 version:3 modified_at_second:1766349495
ec volume id:1 collection: shards:[11 12 13] sizes:[11:8.00 MiB 12:8.00 MiB 13:8.00 MiB] total:24.00 MiB
Disk hdd total size:88678712 file_count:170
DataNode 192.168.10.111:9002 total size:88678712 file_count:170
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9003 hdd(volume:2/8 active:2 free:6 remote:0)
Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
volume id:2 size:81761800 file_count:161 replica_placement:2 version:3 modified_at_second:1766349495
volume id:3 size:88678712 file_count:170 replica_placement:2 version:3 modified_at_second:1766349495
ec volume id:1 collection: shards:[0 4 8] sizes:[0:8.00 MiB 4:8.00 MiB 8:8.00 MiB] total:24.00 MiB
Disk hdd total size:170440512 file_count:331
DataNode 192.168.10.111:9003 total size:170440512 file_count:331
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9004 hdd(volume:2/8 active:2 free:6 remote:0)
Disk hdd(volume:2/8 active:2 free:6 remote:0) id:0
volume id:2 size:81761800 file_count:161 replica_placement:2 version:3 modified_at_second:1766349495
volume id:3 size:88678712 file_count:170 replica_placement:2 version:3 modified_at_second:1766349495
ec volume id:1 collection: shards:[2 6 10] sizes:[2:8.00 MiB 6:8.00 MiB 10:8.00 MiB] total:24.00 MiB
Disk hdd total size:170440512 file_count:331
DataNode 192.168.10.111:9004 total size:170440512 file_count:331
DataCenter DefaultDataCenter hdd(volume:6/40 active:6 free:33 remote:0)
Rack DefaultRack hdd(volume:6/40 active:6 free:33 remote:0)
DataNode 192.168.10.111:9005 hdd(volume:0/8 active:0 free:8 remote:0)
Disk hdd(volume:0/8 active:0 free:8 remote:0) id:0
ec volume id:1 collection: shards:[3 7] sizes:[3:8.00 MiB 7:8.00 MiB] total:16.00 MiB
Disk hdd total size:0 file_count:0
Rack DefaultRack total size:511321536 file_count:993
DataCenter DefaultDataCenter total size:511321536 file_count:993
total size:511321536 file_count:993
```
174 lines
4.1 KiB
Go
174 lines
4.1 KiB
Go
package erasure_coding_test
|
|
|
|
import (
|
|
"reflect"
|
|
"testing"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
|
erasure_coding "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
|
)
|
|
|
|
func TestShardsInfoDeleteParityShards(t *testing.T) {
|
|
si := erasure_coding.NewShardsInfo()
|
|
for _, id := range erasure_coding.AllShardIds() {
|
|
si.Set(id, 123)
|
|
}
|
|
si.DeleteParityShards()
|
|
|
|
if got, want := si.String(), "0:123 B 1:123 B 2:123 B 3:123 B 4:123 B 5:123 B 6:123 B 7:123 B 8:123 B 9:123 B"; got != want {
|
|
t.Errorf("expected %q, got %q", want, got)
|
|
}
|
|
|
|
}
|
|
|
|
func TestShardsInfoAsSlice(t *testing.T) {
|
|
si := erasure_coding.NewShardsInfo()
|
|
si.Set(5, 555)
|
|
si.Set(2, 222)
|
|
si.Set(7, 777)
|
|
si.Set(1, 111)
|
|
|
|
want := []*erasure_coding.ShardInfo{
|
|
&erasure_coding.ShardInfo{Id: 1, Size: 111},
|
|
&erasure_coding.ShardInfo{Id: 2, Size: 222},
|
|
&erasure_coding.ShardInfo{Id: 5, Size: 555},
|
|
&erasure_coding.ShardInfo{Id: 7, Size: 777},
|
|
}
|
|
if got := si.AsSlice(); !reflect.DeepEqual(got, want) {
|
|
t.Errorf("expected %v, got %v", want, got)
|
|
}
|
|
}
|
|
|
|
func TestShardsInfoSerialize(t *testing.T) {
|
|
testCases := []struct {
|
|
name string
|
|
shardIds map[erasure_coding.ShardId]erasure_coding.ShardSize
|
|
wantBits uint32
|
|
wantSizes []erasure_coding.ShardSize
|
|
}{
|
|
{
|
|
name: "no bits",
|
|
shardIds: nil,
|
|
wantBits: 0b0,
|
|
wantSizes: []erasure_coding.ShardSize{},
|
|
},
|
|
{
|
|
name: "single shard, first",
|
|
shardIds: map[erasure_coding.ShardId]erasure_coding.ShardSize{
|
|
0: 2345,
|
|
},
|
|
wantBits: 0b1,
|
|
wantSizes: []erasure_coding.ShardSize{2345},
|
|
},
|
|
{
|
|
name: "single shard, 5th",
|
|
shardIds: map[erasure_coding.ShardId]erasure_coding.ShardSize{
|
|
4: 6789,
|
|
},
|
|
wantBits: 0b10000,
|
|
wantSizes: []erasure_coding.ShardSize{6789},
|
|
},
|
|
{
|
|
name: "multiple shards",
|
|
shardIds: map[erasure_coding.ShardId]erasure_coding.ShardSize{
|
|
8: 800,
|
|
0: 5,
|
|
3: 300,
|
|
1: 100,
|
|
},
|
|
wantBits: 0b100001011,
|
|
wantSizes: []erasure_coding.ShardSize{5, 100, 300, 800},
|
|
},
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
si := erasure_coding.NewShardsInfo()
|
|
for id, size := range tc.shardIds {
|
|
si.Set(id, size)
|
|
}
|
|
|
|
if got, want := si.Bitmap(), tc.wantBits; got != want {
|
|
t.Errorf("expected bits %v, got %v", want, got)
|
|
}
|
|
if got, want := si.Sizes(), tc.wantSizes; !reflect.DeepEqual(got, want) {
|
|
t.Errorf("expected sizes %v, got %v", want, got)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestShardsInfoFromVolumeEcShardInformationMessage(t *testing.T) {
|
|
testCases := []struct {
|
|
name string
|
|
ecvInfo *master_pb.VolumeEcShardInformationMessage
|
|
want string
|
|
}{
|
|
{
|
|
name: "no msg",
|
|
ecvInfo: nil,
|
|
want: "",
|
|
},
|
|
{
|
|
name: "no shards",
|
|
ecvInfo: &master_pb.VolumeEcShardInformationMessage{},
|
|
want: "",
|
|
},
|
|
{
|
|
name: "single shard",
|
|
ecvInfo: &master_pb.VolumeEcShardInformationMessage{
|
|
EcIndexBits: 0b100,
|
|
ShardSizes: []int64{333},
|
|
},
|
|
want: "2:333 B",
|
|
},
|
|
{
|
|
name: "multiple shards",
|
|
ecvInfo: &master_pb.VolumeEcShardInformationMessage{
|
|
EcIndexBits: 0b1101,
|
|
ShardSizes: []int64{111, 333, 444},
|
|
},
|
|
want: "0:111 B 2:333 B 3:444 B",
|
|
},
|
|
{
|
|
name: "multiple shards with missing sizes",
|
|
ecvInfo: &master_pb.VolumeEcShardInformationMessage{
|
|
EcIndexBits: 0b110110,
|
|
ShardSizes: []int64{111, 333, 444},
|
|
},
|
|
want: "1:111 B 2:333 B 4:444 B 5:0 B",
|
|
},
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
si := erasure_coding.ShardsInfoFromVolumeEcShardInformationMessage(tc.ecvInfo)
|
|
if got, want := si.String(), tc.want; got != want {
|
|
t.Errorf("expected %q, got %q", want, got)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestShardsInfoCombine(t *testing.T) {
|
|
a := erasure_coding.NewShardsInfo()
|
|
a.Set(1, 111)
|
|
a.Set(2, 222)
|
|
a.Set(3, 333)
|
|
a.Set(4, 444)
|
|
a.Set(5, 0)
|
|
|
|
b := erasure_coding.NewShardsInfo()
|
|
b.Set(1, 555)
|
|
b.Set(4, 666)
|
|
b.Set(5, 777)
|
|
b.Set(6, 888)
|
|
|
|
if got, want := a.Plus(b).String(), "1:555 B 2:222 B 3:333 B 4:666 B 5:777 B 6:888 B"; got != want {
|
|
t.Errorf("expected %q for plus, got %q", want, got)
|
|
}
|
|
if got, want := a.Minus(b).String(), "2:222 B 3:333 B"; got != want {
|
|
t.Errorf("expected %q for minus, got %q", want, got)
|
|
}
|
|
}
|