Admin: misc improvements on admin server and workers. EC now works. (#7055)

* initial design

* added simulation as tests

* reorganized the codebase to move the simulation framework and tests into their own dedicated package

* integration test. ec worker task

* remove "enhanced" reference

* start master, volume servers, filer

Current Status
 Master: Healthy and running (port 9333)
 Filer: Healthy and running (port 8888)
 Volume Servers: All 6 servers running (ports 8080-8085)
🔄 Admin/Workers: Will start when dependencies are ready

* generate write load

* tasks are assigned

* admin start wtih grpc port. worker has its own working directory

* Update .gitignore

* working worker and admin. Task detection is not working yet.

* compiles, detection uses volumeSizeLimitMB from master

* compiles

* worker retries connecting to admin

* build and restart

* rendering pending tasks

* skip task ID column

* sticky worker id

* test canScheduleTaskNow

* worker reconnect to admin

* clean up logs

* worker register itself first

* worker can run ec work and report status

but:
1. one volume should not be repeatedly worked on.
2. ec shards needs to be distributed and source data should be deleted.

* move ec task logic

* listing ec shards

* local copy, ec. Need to distribute.

* ec is mostly working now

* distribution of ec shards needs improvement
* need configuration to enable ec

* show ec volumes

* interval field UI component

* rename

* integration test with vauuming

* garbage percentage threshold

* fix warning

* display ec shard sizes

* fix ec volumes list

* Update ui.go

* show default values

* ensure correct default value

* MaintenanceConfig use ConfigField

* use schema defined defaults

* config

* reduce duplication

* refactor to use BaseUIProvider

* each task register its schema

* checkECEncodingCandidate use ecDetector

* use vacuumDetector

* use volumeSizeLimitMB

* remove

remove

* remove unused

* refactor

* use new framework

* remove v2 reference

* refactor

* left menu can scroll now

* The maintenance manager was not being initialized when no data directory was configured for persistent storage.

* saving config

* Update task_config_schema_templ.go

* enable/disable tasks

* protobuf encoded task configurations

* fix system settings

* use ui component

* remove logs

* interface{} Reduction

* reduce interface{}

* reduce interface{}

* avoid from/to map

* reduce interface{}

* refactor

* keep it DRY

* added logging

* debug messages

* debug level

* debug

* show the log caller line

* use configured task policy

* log level

* handle admin heartbeat response

* Update worker.go

* fix EC rack and dc count

* Report task status to admin server

* fix task logging, simplify interface checking, use erasure_coding constants

* factor in empty volume server during task planning

* volume.list adds disk id

* track disk id also

* fix locking scheduled and manual scanning

* add active topology

* simplify task detector

* ec task completed, but shards are not showing up

* implement ec in ec_typed.go

* adjust log level

* dedup

* implementing ec copying shards and only ecx files

* use disk id when distributing ec shards

🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk
📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId
🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest
💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId])
📂 File System: EC shards and metadata land in the exact disk directory planned

* Delete original volume from all locations

* clean up existing shard locations

* local encoding and distributing

* Update docker/admin_integration/EC-TESTING-README.md

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* check volume id range

* simplify

* fix tests

* fix types

* clean up logs and tests

---------

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
Chris Lu
2025-07-30 12:38:03 -07:00
committed by GitHub
parent 64198dad83
commit 891a2fb6eb
130 changed files with 27737 additions and 4429 deletions

View File

@@ -91,11 +91,12 @@ func NewStore(grpcDialOption grpc.DialOption, ip string, port int, grpcPort int,
s.Locations = append(s.Locations, location)
stats.VolumeServerMaxVolumeCounter.Add(float64(maxVolumeCounts[i]))
diskId := uint32(i) // Track disk ID
wg.Add(1)
go func() {
go func(id uint32, diskLoc *DiskLocation) {
defer wg.Done()
location.loadExistingVolumes(needleMapKind, ldbTimeout)
}()
diskLoc.loadExistingVolumesWithId(needleMapKind, ldbTimeout, id)
}(diskId, location)
}
wg.Wait()
@@ -163,14 +164,25 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind
if s.findVolume(vid) != nil {
return fmt.Errorf("Volume Id %d already exists!", vid)
}
if location := s.FindFreeLocation(func(location *DiskLocation) bool {
return location.DiskType == diskType
}); location != nil {
glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
location.Directory, vid, collection, replicaPlacement, ttl)
// Find location and its index
var location *DiskLocation
var diskId uint32
for i, loc := range s.Locations {
if loc.DiskType == diskType && s.hasFreeDiskLocation(loc) {
location = loc
diskId = uint32(i)
break
}
}
if location != nil {
glog.V(0).Infof("In dir %s (disk ID %d) adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
location.Directory, diskId, vid, collection, replicaPlacement, ttl)
if volume, err := NewVolume(location.Directory, location.IdxDirectory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, ver, memoryMapMaxSizeMb, ldbTimeout); err == nil {
volume.diskId = diskId // Set the disk ID
location.SetVolume(vid, volume)
glog.V(0).Infof("add volume %d", vid)
glog.V(0).Infof("add volume %d on disk ID %d", vid, diskId)
s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
Id: uint32(vid),
Collection: collection,
@@ -178,6 +190,7 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind
Version: uint32(volume.Version()),
Ttl: ttl.ToUint32(),
DiskType: string(diskType),
DiskId: diskId,
}
return nil
} else {
@@ -187,6 +200,11 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind
return fmt.Errorf("No more free space left")
}
// hasFreeDiskLocation checks if a disk location has free space
func (s *Store) hasFreeDiskLocation(location *DiskLocation) bool {
return int64(location.VolumesLen()) < int64(location.MaxVolumeCount)
}
func (s *Store) VolumeInfos() (allStats []*VolumeInfo) {
for _, location := range s.Locations {
stats := collectStatsForOneLocation(location)
@@ -218,21 +236,10 @@ func collectStatForOneVolume(vid needle.VolumeId, v *Volume) (s *VolumeInfo) {
Ttl: v.Ttl,
CompactRevision: uint32(v.CompactionRevision),
DiskType: v.DiskType().String(),
DiskId: v.diskId,
}
s.RemoteStorageName, s.RemoteStorageKey = v.RemoteStorageNameKey()
v.dataFileAccessLock.RLock()
defer v.dataFileAccessLock.RUnlock()
if v.nm == nil {
return
}
s.FileCount = v.nm.FileCount()
s.DeleteCount = v.nm.DeletedCount()
s.DeletedByteCount = v.nm.DeletedSize()
s.Size = v.nm.ContentSize()
s.Size, _, _ = v.FileStat()
return
}
@@ -384,7 +391,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
}
func (s *Store) deleteExpiredEcVolumes() (ecShards, deleted []*master_pb.VolumeEcShardInformationMessage) {
for _, location := range s.Locations {
for diskId, location := range s.Locations {
// Collect ecVolume to be deleted
var toDeleteEvs []*erasure_coding.EcVolume
location.ecVolumesLock.RLock()
@@ -392,7 +399,7 @@ func (s *Store) deleteExpiredEcVolumes() (ecShards, deleted []*master_pb.VolumeE
if ev.IsTimeToDestroy() {
toDeleteEvs = append(toDeleteEvs, ev)
} else {
messages := ev.ToVolumeEcShardInformationMessage()
messages := ev.ToVolumeEcShardInformationMessage(uint32(diskId))
ecShards = append(ecShards, messages...)
}
}
@@ -400,7 +407,7 @@ func (s *Store) deleteExpiredEcVolumes() (ecShards, deleted []*master_pb.VolumeE
// Delete expired volumes
for _, ev := range toDeleteEvs {
messages := ev.ToVolumeEcShardInformationMessage()
messages := ev.ToVolumeEcShardInformationMessage(uint32(diskId))
// deleteEcVolumeById has its own lock
err := location.deleteEcVolumeById(ev.VolumeId)
if err != nil {
@@ -515,10 +522,11 @@ func (s *Store) MarkVolumeWritable(i needle.VolumeId) error {
}
func (s *Store) MountVolume(i needle.VolumeId) error {
for _, location := range s.Locations {
if found := location.LoadVolume(i, s.NeedleMapKind); found == true {
for diskId, location := range s.Locations {
if found := location.LoadVolume(uint32(diskId), i, s.NeedleMapKind); found == true {
glog.V(0).Infof("mount volume %d", i)
v := s.findVolume(i)
v.diskId = uint32(diskId) // Set disk ID when mounting
s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
Id: uint32(v.Id),
Collection: v.Collection,
@@ -526,6 +534,7 @@ func (s *Store) MountVolume(i needle.VolumeId) error {
Version: uint32(v.Version()),
Ttl: v.Ttl.ToUint32(),
DiskType: string(v.location.DiskType),
DiskId: uint32(diskId),
}
return nil
}
@@ -546,6 +555,7 @@ func (s *Store) UnmountVolume(i needle.VolumeId) error {
Version: uint32(v.Version()),
Ttl: v.Ttl.ToUint32(),
DiskType: string(v.location.DiskType),
DiskId: v.diskId,
}
for _, location := range s.Locations {
@@ -574,6 +584,7 @@ func (s *Store) DeleteVolume(i needle.VolumeId, onlyEmpty bool) error {
Version: uint32(v.Version()),
Ttl: v.Ttl.ToUint32(),
DiskType: string(v.location.DiskType),
DiskId: v.diskId,
}
for _, location := range s.Locations {
err := location.DeleteVolume(i, onlyEmpty)