Admin: misc improvements on admin server and workers. EC now works. (#7055)
* initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
package balance
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -15,6 +16,9 @@ type Task struct {
|
||||
server string
|
||||
volumeID uint32
|
||||
collection string
|
||||
|
||||
// Task parameters for accessing planned destinations
|
||||
taskParams types.TaskParams
|
||||
}
|
||||
|
||||
// NewTask creates a new balance task instance
|
||||
@@ -30,7 +34,31 @@ func NewTask(server string, volumeID uint32, collection string) *Task {
|
||||
|
||||
// Execute executes the balance task
|
||||
func (t *Task) Execute(params types.TaskParams) error {
|
||||
glog.Infof("Starting balance task for volume %d on server %s (collection: %s)", t.volumeID, t.server, t.collection)
|
||||
// Use BaseTask.ExecuteTask to handle logging initialization
|
||||
return t.ExecuteTask(context.Background(), params, t.executeImpl)
|
||||
}
|
||||
|
||||
// executeImpl is the actual balance implementation
|
||||
func (t *Task) executeImpl(ctx context.Context, params types.TaskParams) error {
|
||||
// Store task parameters for accessing planned destinations
|
||||
t.taskParams = params
|
||||
|
||||
// Get planned destination
|
||||
destNode := t.getPlannedDestination()
|
||||
if destNode != "" {
|
||||
t.LogWithFields("INFO", "Starting balance task with planned destination", map[string]interface{}{
|
||||
"volume_id": t.volumeID,
|
||||
"source": t.server,
|
||||
"destination": destNode,
|
||||
"collection": t.collection,
|
||||
})
|
||||
} else {
|
||||
t.LogWithFields("INFO", "Starting balance task without specific destination", map[string]interface{}{
|
||||
"volume_id": t.volumeID,
|
||||
"server": t.server,
|
||||
"collection": t.collection,
|
||||
})
|
||||
}
|
||||
|
||||
// Simulate balance operation with progress updates
|
||||
steps := []struct {
|
||||
@@ -46,18 +74,36 @@ func (t *Task) Execute(params types.TaskParams) error {
|
||||
}
|
||||
|
||||
for _, step := range steps {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.LogWarning("Balance task cancelled during step: %s", step.name)
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if t.IsCancelled() {
|
||||
t.LogWarning("Balance task cancelled by request during step: %s", step.name)
|
||||
return fmt.Errorf("balance task cancelled")
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Balance task step: %s", step.name)
|
||||
t.LogWithFields("INFO", "Executing balance step", map[string]interface{}{
|
||||
"step": step.name,
|
||||
"progress": step.progress,
|
||||
"duration": step.duration.String(),
|
||||
"volume_id": t.volumeID,
|
||||
})
|
||||
t.SetProgress(step.progress)
|
||||
|
||||
// Simulate work
|
||||
time.Sleep(step.duration)
|
||||
}
|
||||
|
||||
glog.Infof("Balance task completed for volume %d on server %s", t.volumeID, t.server)
|
||||
t.LogWithFields("INFO", "Balance task completed successfully", map[string]interface{}{
|
||||
"volume_id": t.volumeID,
|
||||
"server": t.server,
|
||||
"collection": t.collection,
|
||||
"final_progress": 100.0,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -72,6 +118,19 @@ func (t *Task) Validate(params types.TaskParams) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getPlannedDestination extracts the planned destination node from task parameters
|
||||
func (t *Task) getPlannedDestination() string {
|
||||
if t.taskParams.TypedParams != nil {
|
||||
if balanceParams := t.taskParams.TypedParams.GetBalanceParams(); balanceParams != nil {
|
||||
if balanceParams.DestNode != "" {
|
||||
glog.V(2).Infof("Found planned destination for volume %d: %s", t.volumeID, balanceParams.DestNode)
|
||||
return balanceParams.DestNode
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// EstimateTime estimates the time needed for the task
|
||||
func (t *Task) EstimateTime(params types.TaskParams) time.Duration {
|
||||
// Base time for balance operation
|
||||
|
||||
Reference in New Issue
Block a user