Admin: misc improvements on admin server and workers. EC now works. (#7055)
* initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -25,10 +25,10 @@ import (
|
||||
func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat {
|
||||
var ecShardMessages []*master_pb.VolumeEcShardInformationMessage
|
||||
collectionEcShardSize := make(map[string]int64)
|
||||
for _, location := range s.Locations {
|
||||
for diskId, location := range s.Locations {
|
||||
location.ecVolumesLock.RLock()
|
||||
for _, ecShards := range location.ecVolumes {
|
||||
ecShardMessages = append(ecShardMessages, ecShards.ToVolumeEcShardInformationMessage()...)
|
||||
ecShardMessages = append(ecShardMessages, ecShards.ToVolumeEcShardInformationMessage(uint32(diskId))...)
|
||||
|
||||
for _, ecShard := range ecShards.Shards {
|
||||
collectionEcShardSize[ecShards.Collection] += ecShard.Size()
|
||||
@@ -49,9 +49,9 @@ func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat {
|
||||
}
|
||||
|
||||
func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId erasure_coding.ShardId) error {
|
||||
for _, location := range s.Locations {
|
||||
for diskId, location := range s.Locations {
|
||||
if ecVolume, err := location.LoadEcShard(collection, vid, shardId); err == nil {
|
||||
glog.V(0).Infof("MountEcShards %d.%d", vid, shardId)
|
||||
glog.V(0).Infof("MountEcShards %d.%d on disk ID %d", vid, shardId, diskId)
|
||||
|
||||
var shardBits erasure_coding.ShardBits
|
||||
|
||||
@@ -61,6 +61,7 @@ func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId er
|
||||
EcIndexBits: uint32(shardBits.AddShardId(shardId)),
|
||||
DiskType: string(location.DiskType),
|
||||
ExpireAtSec: ecVolume.ExpireAtSec,
|
||||
DiskId: uint32(diskId),
|
||||
}
|
||||
return nil
|
||||
} else if err == os.ErrNotExist {
|
||||
@@ -75,7 +76,7 @@ func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId er
|
||||
|
||||
func (s *Store) UnmountEcShards(vid needle.VolumeId, shardId erasure_coding.ShardId) error {
|
||||
|
||||
ecShard, found := s.findEcShard(vid, shardId)
|
||||
diskId, ecShard, found := s.findEcShard(vid, shardId)
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
@@ -86,26 +87,27 @@ func (s *Store) UnmountEcShards(vid needle.VolumeId, shardId erasure_coding.Shar
|
||||
Collection: ecShard.Collection,
|
||||
EcIndexBits: uint32(shardBits.AddShardId(shardId)),
|
||||
DiskType: string(ecShard.DiskType),
|
||||
DiskId: diskId,
|
||||
}
|
||||
|
||||
for _, location := range s.Locations {
|
||||
if deleted := location.UnloadEcShard(vid, shardId); deleted {
|
||||
glog.V(0).Infof("UnmountEcShards %d.%d", vid, shardId)
|
||||
s.DeletedEcShardsChan <- message
|
||||
return nil
|
||||
}
|
||||
location := s.Locations[diskId]
|
||||
|
||||
if deleted := location.UnloadEcShard(vid, shardId); deleted {
|
||||
glog.V(0).Infof("UnmountEcShards %d.%d", vid, shardId)
|
||||
s.DeletedEcShardsChan <- message
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("UnmountEcShards %d.%d not found on disk", vid, shardId)
|
||||
}
|
||||
|
||||
func (s *Store) findEcShard(vid needle.VolumeId, shardId erasure_coding.ShardId) (*erasure_coding.EcVolumeShard, bool) {
|
||||
for _, location := range s.Locations {
|
||||
func (s *Store) findEcShard(vid needle.VolumeId, shardId erasure_coding.ShardId) (diskId uint32, shard *erasure_coding.EcVolumeShard, found bool) {
|
||||
for diskId, location := range s.Locations {
|
||||
if v, found := location.FindEcShard(vid, shardId); found {
|
||||
return v, found
|
||||
return uint32(diskId), v, found
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
return 0, nil, false
|
||||
}
|
||||
|
||||
func (s *Store) FindEcVolume(vid needle.VolumeId) (*erasure_coding.EcVolume, bool) {
|
||||
|
||||
Reference in New Issue
Block a user