optimize memory usage for large number of volumes

1. unwrap the map to avoid extra map object creation
2. fix ec shard counting in UpdateEcShards
This commit is contained in:
chrislu
2024-10-10 10:00:30 -07:00
parent b28b1a3402
commit 35fd1e1c9a
7 changed files with 50 additions and 49 deletions

View File

@@ -29,10 +29,12 @@ func (d *Disk) AddOrUpdateEcShard(s *erasure_coding.EcVolumeInfo) {
delta = existing.ShardBits.ShardIdCount() - oldCount
}
deltaDiskUsages := newDiskUsages()
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id())))
deltaDiskUsage.ecShardCount = int64(delta)
d.UpAdjustDiskUsageDelta(deltaDiskUsages)
if delta == 0 {
return
}
d.UpAdjustDiskUsageDelta(types.ToDiskType(string(d.Id())), &DiskUsageCounts{
ecShardCount: int64(delta),
})
}
@@ -45,10 +47,11 @@ func (d *Disk) DeleteEcShard(s *erasure_coding.EcVolumeInfo) {
existing.ShardBits = existing.ShardBits.Minus(s.ShardBits)
delta := existing.ShardBits.ShardIdCount() - oldCount
deltaDiskUsages := newDiskUsages()
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id())))
deltaDiskUsage.ecShardCount = int64(delta)
d.UpAdjustDiskUsageDelta(deltaDiskUsages)
if delta != 0 {
d.UpAdjustDiskUsageDelta(types.ToDiskType(string(d.Id())), &DiskUsageCounts{
ecShardCount: int64(delta),
})
}
if existing.ShardBits.ShardIdCount() == 0 {
delete(d.ecShards, s.VolumeId)