* opt: reduce ShardsInfo memory usage with bitmap and sorted slice - Replace map[ShardId]*ShardInfo with sorted []ShardInfo slice - Add ShardBits (uint32) bitmap for O(1) existence checks - Use binary search for O(log n) lookups by shard ID - Maintain sorted order for efficient iteration - Add comprehensive unit tests and benchmarks Memory savings: - Map overhead: ~48 bytes per entry eliminated - Pointers: 8 bytes per entry eliminated - Total: ~56 bytes per shard saved Performance improvements: - Has(): O(1) using bitmap - Size(): O(log n) using binary search (was O(1), acceptable tradeoff) - Count(): O(1) using popcount on bitmap - Iteration: Faster due to cache locality * refactor: add methods to ShardBits type - Add Has(), Set(), Clear(), and Count() methods to ShardBits - Simplify ShardsInfo methods by using ShardBits methods - Improves code readability and encapsulation * opt: use ShardBits directly in ShardsCountFromVolumeEcShardInformationMessage Avoid creating a full ShardsInfo object just to count shards. Directly cast vi.EcIndexBits to ShardBits and use Count() method. * opt: use strings.Builder in ShardsInfo.String() for efficiency * refactor: change AsSlice to return []ShardInfo (values instead of pointers) This completes the memory optimization by avoiding unnecessary pointer slices and potential allocations. * refactor: rename ShardsCountFromVolumeEcShardInformationMessage to GetShardCount * fix: prevent deadlock in Add and Subtract methods Copy shards data from 'other' before releasing its lock to avoid potential deadlock when a.Add(b) and b.Add(a) are called concurrently. The previous implementation held other's lock while calling si.Set/Delete, which acquires si's lock. This could deadlock if two goroutines tried to add/subtract each other concurrently. * opt: avoid unnecessary locking in constructor functions ShardsInfoFromVolume and ShardsInfoFromVolumeEcShardInformationMessage now build shards slice and bitmap directly without calling Set(), which acquires a lock on every call. Since the object is local and not yet shared, locking is unnecessary and adds overhead. This improves performance during object construction. * fix: rename 'copy' variable to avoid shadowing built-in function The variable name 'copy' in TestShardsInfo_Copy shadowed the built-in copy() function, which is confusing and bad practice. Renamed to 'siCopy'. * opt: use math/bits.OnesCount32 and reorganize types 1. Replace manual popcount loop with math/bits.OnesCount32 for better performance and idiomatic Go code 2. Move ShardSize type definition to ec_shards_info.go for better code organization since it's primarily used there * refactor: Set() now accepts ShardInfo for future extensibility Changed Set(id ShardId, size ShardSize) to Set(shard ShardInfo) to support future additions to ShardInfo without changing the API. This makes the code more extensible as new fields can be added to ShardInfo (e.g., checksum, location, etc.) without breaking the Set API. * refactor: move ShardInfo and ShardSize to separate file Created ec_shard_info.go to hold the basic shard types (ShardInfo and ShardSize) for better code organization and separation of concerns. * refactor: add ShardInfo constructor and helper functions Added NewShardInfo() constructor and IsValid() method to better encapsulate ShardInfo creation and validation. Updated code to use the constructor for cleaner, more maintainable code. * fix: update remaining Set() calls to use NewShardInfo constructor Fixed compilation errors in storage and shell packages where Set() calls were not updated to use the new NewShardInfo() constructor. * fix: remove unreachable code in filer backup commands Removed unreachable return statements after infinite loops in filer_backup.go and filer_meta_backup.go to fix compilation errors. * fix: rename 'new' variable to avoid shadowing built-in Renamed 'new' to 'result' in MinusParityShards, Plus, and Minus methods to avoid shadowing Go's built-in new() function. * fix: update remaining test files to use NewShardInfo constructor Fixed Set() calls in command_volume_list_test.go and ec_rebalance_slots_test.go to use NewShardInfo() constructor.
335 lines
10 KiB
Go
335 lines
10 KiB
Go
package erasure_coding
|
|
|
|
import (
|
|
"errors"
|
|
"fmt"
|
|
"os"
|
|
"slices"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/idx"
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/volume_info"
|
|
)
|
|
|
|
var (
|
|
NotFoundError = errors.New("needle not found")
|
|
destroyDelaySeconds int64 = 0
|
|
)
|
|
|
|
type EcVolume struct {
|
|
VolumeId needle.VolumeId
|
|
Collection string
|
|
dir string
|
|
dirIdx string
|
|
ecxFile *os.File
|
|
ecxFileSize int64
|
|
ecxCreatedAt time.Time
|
|
Shards []*EcVolumeShard
|
|
ShardLocations map[ShardId][]pb.ServerAddress
|
|
ShardLocationsRefreshTime time.Time
|
|
ShardLocationsLock sync.RWMutex
|
|
Version needle.Version
|
|
ecjFile *os.File
|
|
ecjFileAccessLock sync.Mutex
|
|
diskType types.DiskType
|
|
datFileSize int64
|
|
ExpireAtSec uint64 //ec volume destroy time, calculated from the ec volume was created
|
|
ECContext *ECContext // EC encoding parameters
|
|
}
|
|
|
|
func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection string, vid needle.VolumeId) (ev *EcVolume, err error) {
|
|
ev = &EcVolume{dir: dir, dirIdx: dirIdx, Collection: collection, VolumeId: vid, diskType: diskType}
|
|
|
|
dataBaseFileName := EcShardFileName(collection, dir, int(vid))
|
|
indexBaseFileName := EcShardFileName(collection, dirIdx, int(vid))
|
|
|
|
// open ecx file
|
|
if ev.ecxFile, err = os.OpenFile(indexBaseFileName+".ecx", os.O_RDWR, 0644); err != nil {
|
|
return nil, fmt.Errorf("cannot open ec volume index %s.ecx: %v", indexBaseFileName, err)
|
|
}
|
|
ecxFi, statErr := ev.ecxFile.Stat()
|
|
if statErr != nil {
|
|
_ = ev.ecxFile.Close()
|
|
return nil, fmt.Errorf("can not stat ec volume index %s.ecx: %v", indexBaseFileName, statErr)
|
|
}
|
|
ev.ecxFileSize = ecxFi.Size()
|
|
ev.ecxCreatedAt = ecxFi.ModTime()
|
|
|
|
// open ecj file
|
|
if ev.ecjFile, err = os.OpenFile(indexBaseFileName+".ecj", os.O_RDWR|os.O_CREATE, 0644); err != nil {
|
|
return nil, fmt.Errorf("cannot open ec volume journal %s.ecj: %v", indexBaseFileName, err)
|
|
}
|
|
|
|
// read volume info
|
|
ev.Version = needle.Version3
|
|
if volumeInfo, _, found, _ := volume_info.MaybeLoadVolumeInfo(dataBaseFileName + ".vif"); found {
|
|
ev.Version = needle.Version(volumeInfo.Version)
|
|
ev.datFileSize = volumeInfo.DatFileSize
|
|
ev.ExpireAtSec = volumeInfo.ExpireAtSec
|
|
|
|
// Initialize EC context from .vif if present; fallback to defaults
|
|
if volumeInfo.EcShardConfig != nil {
|
|
ds := int(volumeInfo.EcShardConfig.DataShards)
|
|
ps := int(volumeInfo.EcShardConfig.ParityShards)
|
|
|
|
// Validate shard counts to prevent zero or invalid values
|
|
if ds <= 0 || ps <= 0 || ds+ps > MaxShardCount {
|
|
glog.Warningf("Invalid EC config in VolumeInfo for volume %d (data=%d, parity=%d), using defaults", vid, ds, ps)
|
|
ev.ECContext = NewDefaultECContext(collection, vid)
|
|
} else {
|
|
ev.ECContext = &ECContext{
|
|
Collection: collection,
|
|
VolumeId: vid,
|
|
DataShards: ds,
|
|
ParityShards: ps,
|
|
}
|
|
glog.V(1).Infof("Loaded EC config from VolumeInfo for volume %d: %s", vid, ev.ECContext.String())
|
|
}
|
|
} else {
|
|
ev.ECContext = NewDefaultECContext(collection, vid)
|
|
}
|
|
} else {
|
|
glog.Warningf("vif file not found,volumeId:%d, filename:%s", vid, dataBaseFileName)
|
|
volume_info.SaveVolumeInfo(dataBaseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)})
|
|
ev.ECContext = NewDefaultECContext(collection, vid)
|
|
}
|
|
|
|
ev.ShardLocations = make(map[ShardId][]pb.ServerAddress)
|
|
|
|
return
|
|
}
|
|
|
|
func (ev *EcVolume) AddEcVolumeShard(ecVolumeShard *EcVolumeShard) bool {
|
|
for _, s := range ev.Shards {
|
|
if s.ShardId == ecVolumeShard.ShardId {
|
|
return false
|
|
}
|
|
}
|
|
ev.Shards = append(ev.Shards, ecVolumeShard)
|
|
slices.SortFunc(ev.Shards, func(a, b *EcVolumeShard) int {
|
|
if a.VolumeId != b.VolumeId {
|
|
return int(a.VolumeId - b.VolumeId)
|
|
}
|
|
return int(a.ShardId - b.ShardId)
|
|
})
|
|
return true
|
|
}
|
|
|
|
func (ev *EcVolume) DeleteEcVolumeShard(shardId ShardId) (ecVolumeShard *EcVolumeShard, deleted bool) {
|
|
foundPosition := -1
|
|
for i, s := range ev.Shards {
|
|
if s.ShardId == shardId {
|
|
foundPosition = i
|
|
}
|
|
}
|
|
if foundPosition < 0 {
|
|
return nil, false
|
|
}
|
|
|
|
ecVolumeShard = ev.Shards[foundPosition]
|
|
ecVolumeShard.Unmount()
|
|
ev.Shards = append(ev.Shards[:foundPosition], ev.Shards[foundPosition+1:]...)
|
|
return ecVolumeShard, true
|
|
}
|
|
|
|
func (ev *EcVolume) FindEcVolumeShard(shardId ShardId) (ecVolumeShard *EcVolumeShard, found bool) {
|
|
for _, s := range ev.Shards {
|
|
if s.ShardId == shardId {
|
|
return s, true
|
|
}
|
|
}
|
|
return nil, false
|
|
}
|
|
|
|
func (ev *EcVolume) Close() {
|
|
for _, s := range ev.Shards {
|
|
s.Close()
|
|
}
|
|
if ev.ecjFile != nil {
|
|
ev.ecjFileAccessLock.Lock()
|
|
_ = ev.ecjFile.Close()
|
|
ev.ecjFile = nil
|
|
ev.ecjFileAccessLock.Unlock()
|
|
}
|
|
if ev.ecxFile != nil {
|
|
_ = ev.ecxFile.Sync()
|
|
_ = ev.ecxFile.Close()
|
|
ev.ecxFile = nil
|
|
}
|
|
}
|
|
|
|
// Sync flushes the .ecx and .ecj files to disk without closing them.
|
|
// This ensures that deletions made via DeleteNeedleFromEcx are visible
|
|
// to other processes/file handles that may read these files.
|
|
func (ev *EcVolume) Sync() {
|
|
if ev.ecjFile != nil {
|
|
ev.ecjFileAccessLock.Lock()
|
|
if err := ev.ecjFile.Sync(); err != nil {
|
|
glog.Warningf("failed to sync ecj file for volume %d: %v", ev.VolumeId, err)
|
|
}
|
|
ev.ecjFileAccessLock.Unlock()
|
|
}
|
|
if ev.ecxFile != nil {
|
|
if err := ev.ecxFile.Sync(); err != nil {
|
|
glog.Warningf("failed to sync ecx file for volume %d: %v", ev.VolumeId, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func (ev *EcVolume) Destroy() {
|
|
ev.Close()
|
|
|
|
for _, s := range ev.Shards {
|
|
s.Destroy()
|
|
}
|
|
os.Remove(ev.FileName(".ecx"))
|
|
os.Remove(ev.FileName(".ecj"))
|
|
os.Remove(ev.FileName(".vif"))
|
|
}
|
|
|
|
func (ev *EcVolume) FileName(ext string) string {
|
|
switch ext {
|
|
case ".ecx", ".ecj":
|
|
return ev.IndexBaseFileName() + ext
|
|
}
|
|
// .vif
|
|
return ev.DataBaseFileName() + ext
|
|
}
|
|
|
|
func (ev *EcVolume) DataBaseFileName() string {
|
|
return EcShardFileName(ev.Collection, ev.dir, int(ev.VolumeId))
|
|
}
|
|
|
|
func (ev *EcVolume) IndexBaseFileName() string {
|
|
return EcShardFileName(ev.Collection, ev.dirIdx, int(ev.VolumeId))
|
|
}
|
|
|
|
func (ev *EcVolume) ShardSize() uint64 {
|
|
if len(ev.Shards) > 0 {
|
|
return uint64(ev.Shards[0].Size())
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (ev *EcVolume) Size() (size uint64) {
|
|
for _, shard := range ev.Shards {
|
|
if shardSize := shard.Size(); shardSize > 0 {
|
|
size += uint64(shardSize)
|
|
}
|
|
}
|
|
return
|
|
}
|
|
|
|
func (ev *EcVolume) CreatedAt() time.Time {
|
|
return ev.ecxCreatedAt
|
|
}
|
|
|
|
func (ev *EcVolume) ShardIdList() (shardIds []ShardId) {
|
|
for _, s := range ev.Shards {
|
|
shardIds = append(shardIds, s.ShardId)
|
|
}
|
|
return
|
|
}
|
|
|
|
func (ev *EcVolume) ToVolumeEcShardInformationMessage(diskId uint32) (messages []*master_pb.VolumeEcShardInformationMessage) {
|
|
ecInfoPerVolume := map[needle.VolumeId]*master_pb.VolumeEcShardInformationMessage{}
|
|
|
|
for _, s := range ev.Shards {
|
|
m, ok := ecInfoPerVolume[s.VolumeId]
|
|
if !ok {
|
|
m = &master_pb.VolumeEcShardInformationMessage{
|
|
Id: uint32(s.VolumeId),
|
|
Collection: s.Collection,
|
|
DiskType: string(ev.diskType),
|
|
ExpireAtSec: ev.ExpireAtSec,
|
|
DiskId: diskId,
|
|
}
|
|
ecInfoPerVolume[s.VolumeId] = m
|
|
}
|
|
|
|
// Update EC shard bits and sizes.
|
|
si := ShardsInfoFromVolumeEcShardInformationMessage(m)
|
|
si.Set(NewShardInfo(s.ShardId, ShardSize(s.Size())))
|
|
m.EcIndexBits = uint32(si.Bitmap())
|
|
m.ShardSizes = si.SizesInt64()
|
|
}
|
|
|
|
for _, m := range ecInfoPerVolume {
|
|
messages = append(messages, m)
|
|
}
|
|
return
|
|
}
|
|
|
|
func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle.Version) (offset types.Offset, size types.Size, intervals []Interval, err error) {
|
|
|
|
// find the needle from ecx file
|
|
offset, size, err = ev.FindNeedleFromEcx(needleId)
|
|
if err != nil {
|
|
return types.Offset{}, 0, nil, fmt.Errorf("FindNeedleFromEcx: %w", err)
|
|
}
|
|
|
|
intervals = ev.LocateEcShardNeedleInterval(version, offset.ToActualOffset(), types.Size(needle.GetActualSize(size, version)))
|
|
return
|
|
}
|
|
|
|
func (ev *EcVolume) LocateEcShardNeedleInterval(version needle.Version, offset int64, size types.Size) (intervals []Interval) {
|
|
shard := ev.Shards[0]
|
|
// Usually shard will be padded to round of ErasureCodingSmallBlockSize.
|
|
// So in most cases, if shardSize equals to n * ErasureCodingLargeBlockSize,
|
|
// the data would be in small blocks.
|
|
shardSize := shard.ecdFileSize - 1
|
|
if ev.datFileSize > 0 {
|
|
// To get the correct LargeBlockRowsCount
|
|
// use datFileSize to calculate the shardSize to match the EC encoding logic.
|
|
shardSize = ev.datFileSize / int64(ev.ECContext.DataShards)
|
|
}
|
|
// calculate the locations in the ec shards
|
|
intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, shardSize, offset, types.Size(needle.GetActualSize(size, version)))
|
|
|
|
return
|
|
}
|
|
|
|
func (ev *EcVolume) FindNeedleFromEcx(needleId types.NeedleId) (offset types.Offset, size types.Size, err error) {
|
|
return SearchNeedleFromSortedIndex(ev.ecxFile, ev.ecxFileSize, needleId, nil)
|
|
}
|
|
|
|
func SearchNeedleFromSortedIndex(ecxFile *os.File, ecxFileSize int64, needleId types.NeedleId, processNeedleFn func(file *os.File, offset int64) error) (offset types.Offset, size types.Size, err error) {
|
|
var key types.NeedleId
|
|
buf := make([]byte, types.NeedleMapEntrySize)
|
|
l, h := int64(0), ecxFileSize/types.NeedleMapEntrySize
|
|
for l < h {
|
|
m := (l + h) / 2
|
|
if n, err := ecxFile.ReadAt(buf, m*types.NeedleMapEntrySize); err != nil {
|
|
if n != types.NeedleMapEntrySize {
|
|
return types.Offset{}, types.TombstoneFileSize, fmt.Errorf("ecx file %d read at %d: %v", ecxFileSize, m*types.NeedleMapEntrySize, err)
|
|
}
|
|
}
|
|
key, offset, size = idx.IdxFileEntry(buf)
|
|
if key == needleId {
|
|
if processNeedleFn != nil {
|
|
err = processNeedleFn(ecxFile, m*types.NeedleMapEntrySize)
|
|
}
|
|
return
|
|
}
|
|
if key < needleId {
|
|
l = m + 1
|
|
} else {
|
|
h = m
|
|
}
|
|
}
|
|
|
|
err = NotFoundError
|
|
return
|
|
}
|
|
|
|
func (ev *EcVolume) IsTimeToDestroy() bool {
|
|
return ev.ExpireAtSec > 0 && time.Now().Unix() > (int64(ev.ExpireAtSec)+destroyDelaySeconds)
|
|
}
|