fix(admin): reduce memory usage and verbose logging for large clusters (#8927)
* fix(admin): reduce memory usage and verbose logging for large clusters (#8919) The admin server used excessive memory and produced thousands of log lines on clusters with many volumes (e.g., 33k volumes). Three root causes: 1. Scanner duplicated all volume metrics: getVolumeHealthMetrics() created VolumeHealthMetrics objects, then convertToTaskMetrics() copied them all into identical types.VolumeHealthMetrics. Now uses the task-system type directly, eliminating the duplicate allocation and removing convertToTaskMetrics. 2. All previous task states loaded at startup: LoadTasksFromPersistence read and deserialized every .pb file from disk, logging each one. With thousands of balance tasks persisted, this caused massive startup I/O, memory usage, and log noise (including unguarded DEBUG glog.Infof per task). Now starts with an empty queue — the scanner re-detects current needs from live cluster state. Terminal tasks are purged from memory and disk when new scan results arrive. 3. Verbose per-volume/per-node logging: V(2) and V(3) logs produced thousands of lines per scan. Per-volume logs bumped to V(4), per-node/rack/disk logs bumped to V(3). Topology summary now logs counts instead of full node ID arrays. Also removes lastTopologyInfo field from MaintenanceScanner — the raw protobuf topology is returned as a local value and not retained between 30-minute scans. * fix(admin): delete stale task files at startup, add DeleteAllTaskStates Old task .pb files from previous runs were left on disk. The periodic CleanupCompletedTasks still loads all files to find completed ones — the same expensive 4GB path from the pprof profile. Now at startup, DeleteAllTaskStates removes all .pb files by scanning the directory without reading or deserializing them. The scanner will re-detect any tasks still needed from live cluster state. * fix(admin): don't persist terminal tasks to disk CompleteTask was saving failed/completed tasks to disk where they'd accumulate. The periodic cleanup only triggered for completed tasks, not failed ones. Now terminal tasks are deleted from disk immediately and only kept in memory for the current session's UI. * fix(admin): cap in-memory tasks to 100 per job type Without a limit, the task map grows unbounded — balance could create thousands of pending tasks for a cluster with many imbalanced volumes. Now AddTask rejects new tasks when a job type already has 100 in the queue. The scanner will re-detect skipped volumes on the next scan. * fix(admin): address PR review - memory-only purge, active-only capacity - purgeTerminalTasks now only cleans in-memory map (terminal tasks are already deleted from disk by CompleteTask) - Per-type capacity limit counts only active tasks (pending/assigned/ in_progress), not terminal ones - When at capacity, purge terminal tasks first before rejecting * fix(admin): fix orphaned comment, add TaskStatusCancelled to terminal switch - Move hasQueuedOrActiveTaskForVolume comment to its function definition - Add TaskStatusCancelled to the terminal state switch in CompleteTask so cancelled task files are deleted from disk
This commit is contained in:
@@ -33,80 +33,16 @@ func (mq *MaintenanceQueue) SetPersistence(persistence TaskPersistence) {
|
||||
glog.V(1).Infof("Maintenance queue configured with task persistence")
|
||||
}
|
||||
|
||||
// LoadTasksFromPersistence loads tasks from persistent storage on startup
|
||||
// LoadTasksFromPersistence is called on startup. Previous task states are NOT loaded
|
||||
// into memory — the maintenance scanner will re-detect current needs from the live
|
||||
// cluster state. Stale task files from previous runs are deleted from disk.
|
||||
func (mq *MaintenanceQueue) LoadTasksFromPersistence() error {
|
||||
if mq.persistence == nil {
|
||||
glog.V(1).Infof("No task persistence configured, skipping task loading")
|
||||
return nil
|
||||
}
|
||||
|
||||
mq.mutex.Lock()
|
||||
defer mq.mutex.Unlock()
|
||||
|
||||
glog.Infof("Loading tasks from persistence...")
|
||||
|
||||
tasks, err := mq.persistence.LoadAllTaskStates()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load task states: %w", err)
|
||||
}
|
||||
|
||||
glog.Infof("DEBUG LoadTasksFromPersistence: Found %d tasks in persistence", len(tasks))
|
||||
|
||||
// Reset task maps
|
||||
mq.tasks = make(map[string]*MaintenanceTask)
|
||||
mq.pendingTasks = make([]*MaintenanceTask, 0)
|
||||
|
||||
// Load tasks by status
|
||||
for _, task := range tasks {
|
||||
glog.Infof("DEBUG LoadTasksFromPersistence: Loading task %s (type: %s, status: %s, scheduled: %v)", task.ID, task.Type, task.Status, task.ScheduledAt)
|
||||
mq.tasks[task.ID] = task
|
||||
|
||||
switch task.Status {
|
||||
case TaskStatusPending:
|
||||
glog.Infof("DEBUG LoadTasksFromPersistence: Adding task %s to pending queue", task.ID)
|
||||
mq.pendingTasks = append(mq.pendingTasks, task)
|
||||
case TaskStatusAssigned, TaskStatusInProgress:
|
||||
// For assigned/in-progress tasks, we need to check if the worker is still available
|
||||
// If not, we should fail them and make them eligible for retry
|
||||
if task.WorkerID != "" {
|
||||
if _, exists := mq.workers[task.WorkerID]; !exists {
|
||||
glog.Warningf("Task %s was assigned to unavailable worker %s, marking as failed", task.ID, task.WorkerID)
|
||||
task.Status = TaskStatusFailed
|
||||
task.Error = "Worker unavailable after restart"
|
||||
completedTime := time.Now()
|
||||
task.CompletedAt = &completedTime
|
||||
|
||||
// Check if it should be retried
|
||||
if task.RetryCount < task.MaxRetries {
|
||||
task.RetryCount++
|
||||
task.Status = TaskStatusPending
|
||||
task.WorkerID = ""
|
||||
task.StartedAt = nil
|
||||
task.CompletedAt = nil
|
||||
task.Error = ""
|
||||
task.ScheduledAt = time.Now().Add(1 * time.Minute) // Retry after restart delay
|
||||
glog.Infof("DEBUG LoadTasksFromPersistence: Retrying task %s, adding to pending queue", task.ID)
|
||||
mq.pendingTasks = append(mq.pendingTasks, task)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sync task with ActiveTopology for capacity tracking
|
||||
if mq.integration != nil {
|
||||
mq.integration.SyncTask(task)
|
||||
if mq.persistence != nil {
|
||||
if err := mq.persistence.DeleteAllTaskStates(); err != nil {
|
||||
glog.Warningf("Failed to clean up old task files: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort pending tasks by priority and schedule time
|
||||
sort.Slice(mq.pendingTasks, func(i, j int) bool {
|
||||
if mq.pendingTasks[i].Priority != mq.pendingTasks[j].Priority {
|
||||
return mq.pendingTasks[i].Priority > mq.pendingTasks[j].Priority
|
||||
}
|
||||
return mq.pendingTasks[i].ScheduledAt.Before(mq.pendingTasks[j].ScheduledAt)
|
||||
})
|
||||
|
||||
glog.Infof("Loaded %d tasks from persistence (%d pending)", len(tasks), len(mq.pendingTasks))
|
||||
glog.Infof("Task queue initialized (previous tasks will be re-detected by scanner)")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -119,6 +55,14 @@ func (mq *MaintenanceQueue) saveTaskState(task *MaintenanceTask) {
|
||||
}
|
||||
}
|
||||
|
||||
func (mq *MaintenanceQueue) deleteTaskState(taskID string) {
|
||||
if mq.persistence != nil {
|
||||
if err := mq.persistence.DeleteTaskState(taskID); err != nil {
|
||||
glog.V(2).Infof("Failed to delete task state for %s: %v", taskID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupCompletedTasks removes old completed tasks beyond the retention limit
|
||||
func (mq *MaintenanceQueue) cleanupCompletedTasks() {
|
||||
if mq.persistence != nil {
|
||||
@@ -128,10 +72,24 @@ func (mq *MaintenanceQueue) cleanupCompletedTasks() {
|
||||
}
|
||||
}
|
||||
|
||||
const MaxTasksPerType = 100
|
||||
|
||||
// AddTask adds a new maintenance task to the queue with deduplication
|
||||
func (mq *MaintenanceQueue) AddTask(task *MaintenanceTask) {
|
||||
mq.mutex.Lock()
|
||||
|
||||
// Enforce per-type capacity limit (only counting active tasks)
|
||||
if mq.countActiveTasksByType(task.Type) >= MaxTasksPerType {
|
||||
// Purge terminal tasks first, then recheck
|
||||
mq.purgeTerminalTasksLocked()
|
||||
if mq.countActiveTasksByType(task.Type) >= MaxTasksPerType {
|
||||
mq.mutex.Unlock()
|
||||
glog.V(1).Infof("Task skipped (type %s at capacity %d): volume %d on %s",
|
||||
task.Type, MaxTasksPerType, task.VolumeID, task.Server)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Enforce one queued/active task per volume (across all task types).
|
||||
if mq.hasQueuedOrActiveTaskForVolume(task.VolumeID) {
|
||||
mq.mutex.Unlock()
|
||||
@@ -197,6 +155,30 @@ func (mq *MaintenanceQueue) AddTask(task *MaintenanceTask) {
|
||||
taskSnapshot.ID, taskSnapshot.Type, taskSnapshot.VolumeID, taskSnapshot.Server, taskSnapshot.Priority, scheduleInfo, taskSnapshot.Reason)
|
||||
}
|
||||
|
||||
// countActiveTasksByType returns the number of active (non-terminal) tasks of a given type. Caller must hold mq.mutex.
|
||||
func (mq *MaintenanceQueue) countActiveTasksByType(taskType MaintenanceTaskType) int {
|
||||
count := 0
|
||||
for _, t := range mq.tasks {
|
||||
if t.Type == taskType {
|
||||
switch t.Status {
|
||||
case TaskStatusPending, TaskStatusAssigned, TaskStatusInProgress:
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// purgeTerminalTasksLocked removes terminal tasks from the in-memory map. Caller must hold mq.mutex.
|
||||
func (mq *MaintenanceQueue) purgeTerminalTasksLocked() {
|
||||
for id, task := range mq.tasks {
|
||||
switch task.Status {
|
||||
case TaskStatusCompleted, TaskStatusFailed, TaskStatusCancelled:
|
||||
delete(mq.tasks, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// hasQueuedOrActiveTaskForVolume checks if any pending/assigned/in-progress task already exists for this volume.
|
||||
// Caller must hold mq.mutex.
|
||||
func (mq *MaintenanceQueue) hasQueuedOrActiveTaskForVolume(volumeID uint32) bool {
|
||||
@@ -273,6 +255,9 @@ func (mq *MaintenanceQueue) CancelPendingTasksByType(taskType MaintenanceTaskTyp
|
||||
|
||||
// AddTasksFromResults converts detection results to tasks and adds them to the queue
|
||||
func (mq *MaintenanceQueue) AddTasksFromResults(results []*TaskDetectionResult) {
|
||||
// Purge terminal tasks from memory before adding new ones
|
||||
mq.purgeTerminalTasks()
|
||||
|
||||
for _, result := range results {
|
||||
// Validate that task has proper typed parameters
|
||||
if result.TypedParams == nil {
|
||||
@@ -297,6 +282,21 @@ func (mq *MaintenanceQueue) AddTasksFromResults(results []*TaskDetectionResult)
|
||||
}
|
||||
}
|
||||
|
||||
// purgeTerminalTasks removes completed/failed/cancelled tasks from memory.
|
||||
// Terminal tasks are already deleted from disk by CompleteTask, so this
|
||||
// only needs to clean up the in-memory map.
|
||||
func (mq *MaintenanceQueue) purgeTerminalTasks() {
|
||||
mq.mutex.Lock()
|
||||
before := len(mq.tasks)
|
||||
mq.purgeTerminalTasksLocked()
|
||||
purged := before - len(mq.tasks)
|
||||
mq.mutex.Unlock()
|
||||
|
||||
if purged > 0 {
|
||||
glog.V(1).Infof("Purged %d terminal tasks from memory", purged)
|
||||
}
|
||||
}
|
||||
|
||||
// GetNextTask returns the next available task for a worker
|
||||
func (mq *MaintenanceQueue) GetNextTask(workerID string, capabilities []MaintenanceTaskType) *MaintenanceTask {
|
||||
// Use read lock for initial checks and search
|
||||
@@ -570,7 +570,6 @@ func (mq *MaintenanceQueue) CompleteTask(taskID string, error string) {
|
||||
}
|
||||
}
|
||||
taskStatus := task.Status
|
||||
taskCount := len(mq.tasks)
|
||||
// Snapshot task state while lock is still held to avoid data race
|
||||
var taskToSaveSnapshot *MaintenanceTask
|
||||
if taskToSave != nil {
|
||||
@@ -578,9 +577,18 @@ func (mq *MaintenanceQueue) CompleteTask(taskID string, error string) {
|
||||
}
|
||||
mq.mutex.Unlock()
|
||||
|
||||
// Save task state to persistence outside the lock
|
||||
// Only persist non-terminal tasks (retries). Completed/failed tasks stay
|
||||
// in memory for the UI but are not written to disk — they would just
|
||||
// accumulate and slow down future startups.
|
||||
if taskToSaveSnapshot != nil {
|
||||
mq.saveTaskState(taskToSaveSnapshot)
|
||||
switch taskStatus {
|
||||
case TaskStatusPending:
|
||||
// Retry — save so the task survives a restart
|
||||
mq.saveTaskState(taskToSaveSnapshot)
|
||||
case TaskStatusCompleted, TaskStatusFailed, TaskStatusCancelled:
|
||||
// Terminal — delete the file if one exists from a previous state
|
||||
mq.deleteTaskState(taskToSaveSnapshot.ID)
|
||||
}
|
||||
}
|
||||
|
||||
if logFn != nil {
|
||||
@@ -591,13 +599,6 @@ func (mq *MaintenanceQueue) CompleteTask(taskID string, error string) {
|
||||
if taskStatus != TaskStatusPending {
|
||||
mq.removePendingOperation(taskID)
|
||||
}
|
||||
|
||||
// Periodically cleanup old completed tasks (when total task count is a multiple of 10)
|
||||
if taskStatus == TaskStatusCompleted {
|
||||
if taskCount%10 == 0 {
|
||||
go mq.cleanupCompletedTasks()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isNonRetriableError returns true for errors that will never succeed on retry,
|
||||
|
||||
Reference in New Issue
Block a user