Fix concurrent map access in EC shards info (#8222)

* fix concurrent map access in EC shards info #8219

* refactor: simplify Disk.ToDiskInfo to use ecShards snapshot and avoid redundant locking

* refactor: improve GetEcShards with pre-allocation and defer
This commit is contained in:
Chris Lu
2026-02-05 10:24:18 -08:00
committed by GitHub
parent e39a4c2041
commit 82d9d8687b
3 changed files with 91 additions and 2 deletions

View File

@@ -8,6 +8,8 @@ import (
"time"
"github.com/seaweedfs/seaweedfs/weed/sequence"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
)
@@ -304,3 +306,45 @@ func TestReservationSystemPerformance(t *testing.T) {
t.Logf("Performance test passed: %v per reservation", avgDuration)
}
}
func TestDisk_GetEcShards_Race(t *testing.T) {
d := NewDisk("hdd")
// Pre-populate with one shard
initialShard := &erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(1),
ShardsInfo: erasure_coding.NewShardsInfo(),
}
initialShard.ShardsInfo.Set(erasure_coding.ShardInfo{Id: 0, Size: 100})
d.AddOrUpdateEcShard(initialShard)
var wg sync.WaitGroup
wg.Add(10)
// Goroutine 1-5: Continuously read shards
for j := 0; j < 5; j++ {
go func() {
defer wg.Done()
for i := 0; i < 10000; i++ {
d.GetEcShards()
}
}()
}
// Goroutine 6-10: Continuously update shards
for j := 0; j < 5; j++ {
go func() {
defer wg.Done()
for i := 0; i < 10000; i++ {
shard := &erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(i % 100),
ShardsInfo: erasure_coding.NewShardsInfo(),
}
shard.ShardsInfo.Set(erasure_coding.ShardInfo{Id: erasure_coding.ShardId(i % 14), Size: 100})
d.AddOrUpdateEcShard(shard)
}
}()
}
wg.Wait()
}