Files
seaweedFS/weed/admin/plugin/plugin_scheduler.go
Chris Lu 8ec9ff4a12 Refactor plugin system and migrate worker runtime (#8369)
* admin: add plugin runtime UI page and route wiring

* pb: add plugin gRPC contract and generated bindings

* admin/plugin: implement worker registry, runtime, monitoring, and config store

* admin/dash: wire plugin runtime and expose plugin workflow APIs

* command: add flags to enable plugin runtime

* admin: rename remaining plugin v2 wording to plugin

* admin/plugin: add detectable job type registry helper

* admin/plugin: add scheduled detection and dispatch orchestration

* admin/plugin: prefetch job type descriptors when workers connect

* admin/plugin: add known job type discovery API and UI

* admin/plugin: refresh design doc to match current implementation

* admin/plugin: enforce per-worker scheduler concurrency limits

* admin/plugin: use descriptor runtime defaults for scheduler policy

* admin/ui: auto-load first known plugin job type on page open

* admin/plugin: bootstrap persisted config from descriptor defaults

* admin/plugin: dedupe scheduled proposals by dedupe key

* admin/ui: add job type and state filters for plugin monitoring

* admin/ui: add per-job-type plugin activity summary

* admin/plugin: split descriptor read API from schema refresh

* admin/ui: keep plugin summary metrics global while tables are filtered

* admin/plugin: retry executor reservation before timing out

* admin/plugin: expose scheduler states for monitoring

* admin/ui: show per-job-type scheduler states in plugin monitor

* pb/plugin: rename protobuf package to plugin

* admin/plugin: rename pluginRuntime wiring to plugin

* admin/plugin: remove runtime naming from plugin APIs and UI

* admin/plugin: rename runtime files to plugin naming

* admin/plugin: persist jobs and activities for monitor recovery

* admin/plugin: lease one detector worker per job type

* admin/ui: show worker load from plugin heartbeats

* admin/plugin: skip stale workers for detector and executor picks

* plugin/worker: add plugin worker command and stream runtime scaffold

* plugin/worker: implement vacuum detect and execute handlers

* admin/plugin: document external vacuum plugin worker starter

* command: update plugin.worker help to reflect implemented flow

* command/admin: drop legacy Plugin V2 label

* plugin/worker: validate vacuum job type and respect min interval

* plugin/worker: test no-op detect when min interval not elapsed

* command/admin: document plugin.worker external process

* plugin/worker: advertise configured concurrency in hello

* command/plugin.worker: add jobType handler selection

* command/plugin.worker: test handler selection by job type

* command/plugin.worker: persist worker id in workingDir

* admin/plugin: document plugin.worker jobType and workingDir flags

* plugin/worker: support cancel request for in-flight work

* plugin/worker: test cancel request acknowledgements

* command/plugin.worker: document workingDir and jobType behavior

* plugin/worker: emit executor activity events for monitor

* plugin/worker: test executor activity builder

* admin/plugin: send last successful run in detection request

* admin/plugin: send cancel request when detect or execute context ends

* admin/plugin: document worker cancel request responsibility

* admin/handlers: expose plugin scheduler states API in no-auth mode

* admin/handlers: test plugin scheduler states route registration

* admin/plugin: keep worker id on worker-generated activity records

* admin/plugin: test worker id propagation in monitor activities

* admin/dash: always initialize plugin service

* command/admin: remove plugin enable flags and default to enabled

* admin/dash: drop pluginEnabled constructor parameter

* admin/plugin UI: stop checking plugin enabled state

* admin/plugin: remove docs for plugin enable flags

* admin/dash: remove unused plugin enabled check method

* admin/dash: fallback to in-memory plugin init when dataDir fails

* admin/plugin API: expose worker gRPC port in status

* command/plugin.worker: resolve admin gRPC port via plugin status

* split plugin UI into overview/configuration/monitoring pages

* Update layout_templ.go

* add volume_balance plugin worker handler

* wire plugin.worker CLI for volume_balance job type

* add erasure_coding plugin worker handler

* wire plugin.worker CLI for erasure_coding job type

* support multi-job handlers in plugin worker runtime

* allow plugin.worker jobType as comma-separated list

* admin/plugin UI: rename to Workers and simplify config view

* plugin worker: queue detection requests instead of capacity reject

* Update plugin_worker.go

* plugin volume_balance: remove force_move/timeout from worker config UI

* plugin erasure_coding: enforce local working dir and cleanup

* admin/plugin UI: rename admin settings to job scheduling

* admin/plugin UI: persist and robustly render detection results

* admin/plugin: record and return detection trace metadata

* admin/plugin UI: show detection process and decision trace

* plugin: surface detector decision trace as activities

* mini: start a plugin worker by default

* admin/plugin UI: split monitoring into detection and execution tabs

* plugin worker: emit detection decision trace for EC and balance

* admin workers UI: split monitoring into detection and execution pages

* plugin scheduler: skip proposals for active assigned/running jobs

* admin workers UI: add job queue tab

* plugin worker: add dummy stress detector and executor job type

* admin workers UI: reorder tabs to detection queue execution

* admin workers UI: regenerate plugin template

* plugin defaults: include dummy stress and add stress tests

* plugin dummy stress: rotate detection selections across runs

* plugin scheduler: remove cross-run proposal dedupe

* plugin queue: track pending scheduled jobs

* plugin scheduler: wait for executor capacity before dispatch

* plugin scheduler: skip detection when waiting backlog is high

* plugin: add disk-backed job detail API and persistence

* admin ui: show plugin job detail modal from job id links

* plugin: generate unique job ids instead of reusing proposal ids

* plugin worker: emit heartbeats on work state changes

* plugin registry: round-robin tied executor and detector picks

* add temporary EC overnight stress runner

* plugin job details: persist and render EC execution plans

* ec volume details: color data and parity shard badges

* shard labels: keep parity ids numeric and color-only distinction

* admin: remove legacy maintenance UI routes and templates

* admin: remove dead maintenance endpoint helpers

* Update layout_templ.go

* remove dummy_stress worker and command support

* refactor plugin UI to job-type top tabs and sub-tabs

* migrate weed worker command to plugin runtime

* remove plugin.worker command and keep worker runtime with metrics

* update helm worker args for jobType and execution flags

* set plugin scheduling defaults to global 16 and per-worker 4

* stress: fix RPC context reuse and remove redundant variables in ec_stress_runner

* admin/plugin: fix lifecycle races, safe channel operations, and terminal state constants

* admin/dash: randomize job IDs and fix priority zero-value overwrite in plugin API

* admin/handlers: implement buffered rendering to prevent response corruption

* admin/plugin: implement debounced persistence flusher and optimize BuildJobDetail memory lookups

* admin/plugin: fix priority overwrite and implement bounded wait in scheduler reserve

* admin/plugin: implement atomic file writes and fix run record side effects

* admin/plugin: use P prefix for parity shard labels in execution plans

* admin/plugin: enable parallel execution for cancellation tests

* admin: refactor time.Time fields to pointers for better JSON omitempty support

* admin/plugin: implement pointer-safe time assignments and comparisons in plugin core

* admin/plugin: fix time assignment and sorting logic in plugin monitor after pointer refactor

* admin/plugin: update scheduler activity tracking to use time pointers

* admin/plugin: fix time-based run history trimming after pointer refactor

* admin/dash: fix JobSpec struct literal in plugin API after pointer refactor

* admin/view: add D/P prefixes to EC shard badges for UI consistency

* admin/plugin: use lifecycle-aware context for schema prefetching

* Update ec_volume_details_templ.go

* admin/stress: fix proposal sorting and log volume cleanup errors

* stress: refine ec stress runner with math/rand and collection name

- Added Collection field to VolumeEcShardsDeleteRequest for correct filename construction.
- Replaced crypto/rand with seeded math/rand PRNG for bulk payloads.
- Added documentation for EcMinAge zero-value behavior.
- Added logging for ignored errors in volume/shard deletion.

* admin: return internal server error for plugin store failures

Changed error status code from 400 Bad Request to 500 Internal Server Error for failures in GetPluginJobDetail to correctly reflect server-side errors.

* admin: implement safe channel sends and graceful shutdown sync

- Added sync.WaitGroup to Plugin struct to manage background goroutines.
- Implemented safeSendCh helper using recover() to prevent panics on closed channels.
- Ensured Shutdown() waits for all background operations to complete.

* admin: robustify plugin monitor with nil-safe time and record init

- Standardized nil-safe assignment for *time.Time pointers (CreatedAt, UpdatedAt, CompletedAt).
- Ensured persistJobDetailSnapshot initializes new records correctly if they don't exist on disk.
- Fixed debounced persistence to trigger immediate write on job completion.

* admin: improve scheduler shutdown behavior and logic guards

- Replaced brittle error string matching with explicit r.shutdownCh selection for shutdown detection.
- Removed redundant nil guard in buildScheduledJobSpec.
- Standardized WaitGroup usage for schedulerLoop.

* admin: implement deep copy for job parameters and atomic write fixes

- Implemented deepCopyGenericValue and used it in cloneTrackedJob to prevent shared state.
- Ensured atomicWriteFile creates parent directories before writing.

* admin: remove unreachable branch in shard classification

Removed an unreachable 'totalShards <= 0' check in classifyShardID as dataShards and parityShards are already guarded.

* admin: secure UI links and use canonical shard constants

- Added rel="noopener noreferrer" to external links for security.
- Replaced magic number 14 with erasure_coding.TotalShardsCount.
- Used renderEcShardBadge for missing shard list consistency.

* admin: stabilize plugin tests and fix regressions

- Composed a robust plugin_monitor_test.go to handle asynchronous persistence.
- Updated all time.Time literals to use timeToPtr helper.
- Added explicit Shutdown() calls in tests to synchronize with debounced writes.
- Fixed syntax errors and orphaned struct literals in tests.

* Potential fix for code scanning alert no. 278: Slice memory allocation with excessive size value

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 283: Uncontrolled data used in path expression

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* admin: finalize refinements for error handling, scheduler, and race fixes

- Standardized HTTP 500 status codes for store failures in plugin_api.go.
- Tracked scheduled detection goroutines with sync.WaitGroup for safe shutdown.
- Fixed race condition in safeSendDetectionComplete by extracting channel under lock.
- Implemented deep copy for JobActivity details.
- Used defaultDirPerm constant in atomicWriteFile.

* test(ec): migrate admin dockertest to plugin APIs

* admin/plugin_api: fix RunPluginJobTypeAPI to return 500 for server-side detection/filter errors

* admin/plugin_api: fix ExecutePluginJobAPI to return 500 for job execution failures

* admin/plugin_api: limit parseProtoJSONBody request body to 1MB to prevent unbounded memory usage

* admin/plugin: consolidate regex to package-level validJobTypePattern; add char validation to sanitizeJobID

* admin/plugin: fix racy Shutdown channel close with sync.Once

* admin/plugin: track sendLoop and recv goroutines in WorkerStream with r.wg

* admin/plugin: document writeProtoFiles atomicity — .pb is source of truth, .json is human-readable only

* admin/plugin: extract activityLess helper to deduplicate nil-safe OccurredAt sort comparators

* test/ec: check http.NewRequest errors to prevent nil req panics

* test/ec: replace deprecated ioutil/math/rand, fix stale step comment 5.1→3.1

* plugin(ec): raise default detection and scheduling throughput limits

* topology: include empty disks in volume list and EC capacity fallback

* topology: remove hard 10-task cap for detection planning

* Update ec_volume_details_templ.go

* adjust default

* fix tests

---------

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
2026-02-18 13:42:41 -08:00

946 lines
24 KiB
Go

package plugin
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"google.golang.org/protobuf/types/known/timestamppb"
)
var errExecutorAtCapacity = errors.New("executor is at capacity")
const (
defaultSchedulerTick = 5 * time.Second
defaultScheduledDetectionInterval = 300 * time.Second
defaultScheduledDetectionTimeout = 45 * time.Second
defaultScheduledExecutionTimeout = 90 * time.Second
defaultScheduledMaxResults int32 = 1000
defaultScheduledExecutionConcurrency = 1
defaultScheduledPerWorkerConcurrency = 1
maxScheduledExecutionConcurrency = 128
defaultScheduledRetryBackoff = 5 * time.Second
defaultClusterContextTimeout = 10 * time.Second
defaultWaitingBacklogFloor = 8
defaultWaitingBacklogMultiplier = 4
)
type schedulerPolicy struct {
DetectionInterval time.Duration
DetectionTimeout time.Duration
ExecutionTimeout time.Duration
RetryBackoff time.Duration
MaxResults int32
ExecutionConcurrency int
PerWorkerConcurrency int
RetryLimit int
ExecutorReserveBackoff time.Duration
}
func (r *Plugin) schedulerLoop() {
defer r.wg.Done()
ticker := time.NewTicker(r.schedulerTick)
defer ticker.Stop()
// Try once immediately on startup.
r.runSchedulerTick()
for {
select {
case <-r.shutdownCh:
return
case <-ticker.C:
r.runSchedulerTick()
}
}
}
func (r *Plugin) runSchedulerTick() {
jobTypes := r.registry.DetectableJobTypes()
if len(jobTypes) == 0 {
return
}
active := make(map[string]struct{}, len(jobTypes))
for _, jobType := range jobTypes {
active[jobType] = struct{}{}
policy, enabled, err := r.loadSchedulerPolicy(jobType)
if err != nil {
glog.Warningf("Plugin scheduler failed to load policy for %s: %v", jobType, err)
continue
}
if !enabled {
r.clearSchedulerJobType(jobType)
continue
}
if !r.markDetectionDue(jobType, policy.DetectionInterval) {
continue
}
r.wg.Add(1)
go func(jt string, p schedulerPolicy) {
defer r.wg.Done()
r.runScheduledDetection(jt, p)
}(jobType, policy)
}
r.pruneSchedulerState(active)
r.pruneDetectorLeases(active)
}
func (r *Plugin) loadSchedulerPolicy(jobType string) (schedulerPolicy, bool, error) {
cfg, err := r.store.LoadJobTypeConfig(jobType)
if err != nil {
return schedulerPolicy{}, false, err
}
descriptor, err := r.store.LoadDescriptor(jobType)
if err != nil {
return schedulerPolicy{}, false, err
}
adminRuntime := deriveSchedulerAdminRuntime(cfg, descriptor)
if adminRuntime == nil {
return schedulerPolicy{}, false, nil
}
if !adminRuntime.Enabled {
return schedulerPolicy{}, false, nil
}
policy := schedulerPolicy{
DetectionInterval: durationFromSeconds(adminRuntime.DetectionIntervalSeconds, defaultScheduledDetectionInterval),
DetectionTimeout: durationFromSeconds(adminRuntime.DetectionTimeoutSeconds, defaultScheduledDetectionTimeout),
ExecutionTimeout: defaultScheduledExecutionTimeout,
RetryBackoff: durationFromSeconds(adminRuntime.RetryBackoffSeconds, defaultScheduledRetryBackoff),
MaxResults: adminRuntime.MaxJobsPerDetection,
ExecutionConcurrency: int(adminRuntime.GlobalExecutionConcurrency),
PerWorkerConcurrency: int(adminRuntime.PerWorkerExecutionConcurrency),
RetryLimit: int(adminRuntime.RetryLimit),
ExecutorReserveBackoff: 200 * time.Millisecond,
}
if policy.DetectionInterval < r.schedulerTick {
policy.DetectionInterval = r.schedulerTick
}
if policy.MaxResults <= 0 {
policy.MaxResults = defaultScheduledMaxResults
}
if policy.ExecutionConcurrency <= 0 {
policy.ExecutionConcurrency = defaultScheduledExecutionConcurrency
}
if policy.ExecutionConcurrency > maxScheduledExecutionConcurrency {
policy.ExecutionConcurrency = maxScheduledExecutionConcurrency
}
if policy.PerWorkerConcurrency <= 0 {
policy.PerWorkerConcurrency = defaultScheduledPerWorkerConcurrency
}
if policy.PerWorkerConcurrency > policy.ExecutionConcurrency {
policy.PerWorkerConcurrency = policy.ExecutionConcurrency
}
if policy.RetryLimit < 0 {
policy.RetryLimit = 0
}
// Plugin protocol currently has only detection timeout in admin settings.
execTimeout := time.Duration(adminRuntime.DetectionTimeoutSeconds*2) * time.Second
if execTimeout < defaultScheduledExecutionTimeout {
execTimeout = defaultScheduledExecutionTimeout
}
policy.ExecutionTimeout = execTimeout
return policy, true, nil
}
func (r *Plugin) ListSchedulerStates() ([]SchedulerJobTypeState, error) {
jobTypes, err := r.ListKnownJobTypes()
if err != nil {
return nil, err
}
r.schedulerMu.Lock()
nextDetectionAt := make(map[string]time.Time, len(r.nextDetectionAt))
for jobType, nextRun := range r.nextDetectionAt {
nextDetectionAt[jobType] = nextRun
}
detectionInFlight := make(map[string]bool, len(r.detectionInFlight))
for jobType, inFlight := range r.detectionInFlight {
detectionInFlight[jobType] = inFlight
}
r.schedulerMu.Unlock()
states := make([]SchedulerJobTypeState, 0, len(jobTypes))
for _, jobType := range jobTypes {
state := SchedulerJobTypeState{
JobType: jobType,
DetectionInFlight: detectionInFlight[jobType],
}
if nextRun, ok := nextDetectionAt[jobType]; ok && !nextRun.IsZero() {
nextRunUTC := nextRun.UTC()
state.NextDetectionAt = &nextRunUTC
}
policy, enabled, loadErr := r.loadSchedulerPolicy(jobType)
if loadErr != nil {
state.PolicyError = loadErr.Error()
} else {
state.Enabled = enabled
if enabled {
state.DetectionIntervalSeconds = secondsFromDuration(policy.DetectionInterval)
state.DetectionTimeoutSeconds = secondsFromDuration(policy.DetectionTimeout)
state.ExecutionTimeoutSeconds = secondsFromDuration(policy.ExecutionTimeout)
state.MaxJobsPerDetection = policy.MaxResults
state.GlobalExecutionConcurrency = policy.ExecutionConcurrency
state.PerWorkerExecutionConcurrency = policy.PerWorkerConcurrency
state.RetryLimit = policy.RetryLimit
state.RetryBackoffSeconds = secondsFromDuration(policy.RetryBackoff)
}
}
leasedWorkerID := r.getDetectorLease(jobType)
if leasedWorkerID != "" {
state.DetectorWorkerID = leasedWorkerID
if worker, ok := r.registry.Get(leasedWorkerID); ok {
if capability := worker.Capabilities[jobType]; capability != nil && capability.CanDetect {
state.DetectorAvailable = true
}
}
}
if state.DetectorWorkerID == "" {
detector, detectorErr := r.registry.PickDetector(jobType)
if detectorErr == nil && detector != nil {
state.DetectorAvailable = true
state.DetectorWorkerID = detector.WorkerID
}
}
executors, executorErr := r.registry.ListExecutors(jobType)
if executorErr == nil {
state.ExecutorWorkerCount = len(executors)
}
states = append(states, state)
}
return states, nil
}
func deriveSchedulerAdminRuntime(
cfg *plugin_pb.PersistedJobTypeConfig,
descriptor *plugin_pb.JobTypeDescriptor,
) *plugin_pb.AdminRuntimeConfig {
if cfg != nil && cfg.AdminRuntime != nil {
adminConfig := *cfg.AdminRuntime
return &adminConfig
}
if descriptor == nil || descriptor.AdminRuntimeDefaults == nil {
return nil
}
defaults := descriptor.AdminRuntimeDefaults
return &plugin_pb.AdminRuntimeConfig{
Enabled: defaults.Enabled,
DetectionIntervalSeconds: defaults.DetectionIntervalSeconds,
DetectionTimeoutSeconds: defaults.DetectionTimeoutSeconds,
MaxJobsPerDetection: defaults.MaxJobsPerDetection,
GlobalExecutionConcurrency: defaults.GlobalExecutionConcurrency,
PerWorkerExecutionConcurrency: defaults.PerWorkerExecutionConcurrency,
RetryLimit: defaults.RetryLimit,
RetryBackoffSeconds: defaults.RetryBackoffSeconds,
}
}
func (r *Plugin) markDetectionDue(jobType string, interval time.Duration) bool {
now := time.Now().UTC()
r.schedulerMu.Lock()
defer r.schedulerMu.Unlock()
if r.detectionInFlight[jobType] {
return false
}
nextRun, exists := r.nextDetectionAt[jobType]
if exists && now.Before(nextRun) {
return false
}
r.nextDetectionAt[jobType] = now.Add(interval)
r.detectionInFlight[jobType] = true
return true
}
func (r *Plugin) finishDetection(jobType string) {
r.schedulerMu.Lock()
delete(r.detectionInFlight, jobType)
r.schedulerMu.Unlock()
}
func (r *Plugin) pruneSchedulerState(activeJobTypes map[string]struct{}) {
r.schedulerMu.Lock()
defer r.schedulerMu.Unlock()
for jobType := range r.nextDetectionAt {
if _, ok := activeJobTypes[jobType]; !ok {
delete(r.nextDetectionAt, jobType)
delete(r.detectionInFlight, jobType)
}
}
}
func (r *Plugin) clearSchedulerJobType(jobType string) {
r.schedulerMu.Lock()
delete(r.nextDetectionAt, jobType)
delete(r.detectionInFlight, jobType)
r.schedulerMu.Unlock()
r.clearDetectorLease(jobType, "")
}
func (r *Plugin) pruneDetectorLeases(activeJobTypes map[string]struct{}) {
r.detectorLeaseMu.Lock()
defer r.detectorLeaseMu.Unlock()
for jobType := range r.detectorLeases {
if _, ok := activeJobTypes[jobType]; !ok {
delete(r.detectorLeases, jobType)
}
}
}
func (r *Plugin) runScheduledDetection(jobType string, policy schedulerPolicy) {
defer r.finishDetection(jobType)
start := time.Now().UTC()
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: "scheduled detection started",
Stage: "detecting",
OccurredAt: timeToPtr(start),
})
if skip, waitingCount, waitingThreshold := r.shouldSkipDetectionForWaitingJobs(jobType, policy); skip {
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled detection skipped: waiting backlog %d reached threshold %d", waitingCount, waitingThreshold),
Stage: "skipped_waiting_backlog",
OccurredAt: timeToPtr(time.Now().UTC()),
})
return
}
clusterContext, err := r.loadSchedulerClusterContext()
if err != nil {
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled detection aborted: %v", err),
Stage: "failed",
OccurredAt: timeToPtr(time.Now().UTC()),
})
return
}
ctx, cancel := context.WithTimeout(context.Background(), policy.DetectionTimeout)
proposals, err := r.RunDetection(ctx, jobType, clusterContext, policy.MaxResults)
cancel()
if err != nil {
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled detection failed: %v", err),
Stage: "failed",
OccurredAt: timeToPtr(time.Now().UTC()),
})
return
}
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled detection completed: %d proposal(s)", len(proposals)),
Stage: "detected",
OccurredAt: timeToPtr(time.Now().UTC()),
})
filteredByActive, skippedActive := r.filterProposalsWithActiveJobs(jobType, proposals)
if skippedActive > 0 {
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled detection skipped %d proposal(s) due to active assigned/running jobs", skippedActive),
Stage: "deduped_active_jobs",
OccurredAt: timeToPtr(time.Now().UTC()),
})
}
if len(filteredByActive) == 0 {
return
}
filtered := r.filterScheduledProposals(filteredByActive)
if len(filtered) != len(filteredByActive) {
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled detection deduped %d proposal(s) within this run", len(filteredByActive)-len(filtered)),
Stage: "deduped",
OccurredAt: timeToPtr(time.Now().UTC()),
})
}
if len(filtered) == 0 {
return
}
r.dispatchScheduledProposals(jobType, filtered, clusterContext, policy)
}
func (r *Plugin) loadSchedulerClusterContext() (*plugin_pb.ClusterContext, error) {
if r.clusterContextProvider == nil {
return nil, fmt.Errorf("cluster context provider is not configured")
}
ctx, cancel := context.WithTimeout(context.Background(), defaultClusterContextTimeout)
defer cancel()
clusterContext, err := r.clusterContextProvider(ctx)
if err != nil {
return nil, err
}
if clusterContext == nil {
return nil, fmt.Errorf("cluster context provider returned nil")
}
return clusterContext, nil
}
func (r *Plugin) dispatchScheduledProposals(
jobType string,
proposals []*plugin_pb.JobProposal,
clusterContext *plugin_pb.ClusterContext,
policy schedulerPolicy,
) {
jobQueue := make(chan *plugin_pb.JobSpec, len(proposals))
for index, proposal := range proposals {
job := buildScheduledJobSpec(jobType, proposal, index)
r.trackExecutionQueued(job)
select {
case <-r.shutdownCh:
close(jobQueue)
return
default:
jobQueue <- job
}
}
close(jobQueue)
var wg sync.WaitGroup
var statsMu sync.Mutex
successCount := 0
errorCount := 0
workerCount := policy.ExecutionConcurrency
if workerCount < 1 {
workerCount = 1
}
for i := 0; i < workerCount; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for job := range jobQueue {
select {
case <-r.shutdownCh:
return
default:
}
for {
select {
case <-r.shutdownCh:
return
default:
}
executor, release, reserveErr := r.reserveScheduledExecutor(jobType, policy)
if reserveErr != nil {
select {
case <-r.shutdownCh:
return
default:
}
statsMu.Lock()
errorCount++
statsMu.Unlock()
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled execution reservation failed: %v", reserveErr),
Stage: "failed",
OccurredAt: timeToPtr(time.Now().UTC()),
})
break
}
err := r.executeScheduledJobWithExecutor(executor, job, clusterContext, policy)
release()
if errors.Is(err, errExecutorAtCapacity) {
r.trackExecutionQueued(job)
if !waitForShutdownOrTimer(r.shutdownCh, policy.ExecutorReserveBackoff) {
return
}
continue
}
if err != nil {
statsMu.Lock()
errorCount++
statsMu.Unlock()
r.appendActivity(JobActivity{
JobID: job.JobId,
JobType: job.JobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled execution failed: %v", err),
Stage: "failed",
OccurredAt: timeToPtr(time.Now().UTC()),
})
break
}
statsMu.Lock()
successCount++
statsMu.Unlock()
break
}
}
}()
}
wg.Wait()
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled execution finished: success=%d error=%d", successCount, errorCount),
Stage: "executed",
OccurredAt: timeToPtr(time.Now().UTC()),
})
}
func (r *Plugin) reserveScheduledExecutor(
jobType string,
policy schedulerPolicy,
) (*WorkerSession, func(), error) {
deadline := time.Now().Add(policy.ExecutionTimeout)
if policy.ExecutionTimeout <= 0 {
deadline = time.Now().Add(10 * time.Minute) // Default cap
}
for {
select {
case <-r.shutdownCh:
return nil, nil, fmt.Errorf("plugin is shutting down")
default:
}
if time.Now().After(deadline) {
return nil, nil, fmt.Errorf("timed out waiting for executor capacity for %s", jobType)
}
executors, err := r.registry.ListExecutors(jobType)
if err != nil {
if !waitForShutdownOrTimer(r.shutdownCh, policy.ExecutorReserveBackoff) {
return nil, nil, fmt.Errorf("plugin is shutting down")
}
continue
}
for _, executor := range executors {
release, ok := r.tryReserveExecutorCapacity(executor, jobType, policy)
if !ok {
continue
}
return executor, release, nil
}
if !waitForShutdownOrTimer(r.shutdownCh, policy.ExecutorReserveBackoff) {
return nil, nil, fmt.Errorf("plugin is shutting down")
}
}
}
func (r *Plugin) tryReserveExecutorCapacity(
executor *WorkerSession,
jobType string,
policy schedulerPolicy,
) (func(), bool) {
if executor == nil || strings.TrimSpace(executor.WorkerID) == "" {
return nil, false
}
limit := schedulerWorkerExecutionLimit(executor, jobType, policy)
if limit <= 0 {
return nil, false
}
heartbeatUsed := 0
if executor.Heartbeat != nil && executor.Heartbeat.ExecutionSlotsUsed > 0 {
heartbeatUsed = int(executor.Heartbeat.ExecutionSlotsUsed)
}
workerID := strings.TrimSpace(executor.WorkerID)
r.schedulerExecMu.Lock()
reserved := r.schedulerExecReservations[workerID]
if heartbeatUsed+reserved >= limit {
r.schedulerExecMu.Unlock()
return nil, false
}
r.schedulerExecReservations[workerID] = reserved + 1
r.schedulerExecMu.Unlock()
release := func() {
r.releaseExecutorCapacity(workerID)
}
return release, true
}
func (r *Plugin) releaseExecutorCapacity(workerID string) {
workerID = strings.TrimSpace(workerID)
if workerID == "" {
return
}
r.schedulerExecMu.Lock()
defer r.schedulerExecMu.Unlock()
current := r.schedulerExecReservations[workerID]
if current <= 1 {
delete(r.schedulerExecReservations, workerID)
return
}
r.schedulerExecReservations[workerID] = current - 1
}
func schedulerWorkerExecutionLimit(executor *WorkerSession, jobType string, policy schedulerPolicy) int {
limit := policy.PerWorkerConcurrency
if limit <= 0 {
limit = defaultScheduledPerWorkerConcurrency
}
if capability := executor.Capabilities[jobType]; capability != nil && capability.MaxExecutionConcurrency > 0 {
capLimit := int(capability.MaxExecutionConcurrency)
if capLimit < limit {
limit = capLimit
}
}
if executor.Heartbeat != nil && executor.Heartbeat.ExecutionSlotsTotal > 0 {
heartbeatLimit := int(executor.Heartbeat.ExecutionSlotsTotal)
if heartbeatLimit < limit {
limit = heartbeatLimit
}
}
if limit < 0 {
return 0
}
return limit
}
func (r *Plugin) executeScheduledJobWithExecutor(
executor *WorkerSession,
job *plugin_pb.JobSpec,
clusterContext *plugin_pb.ClusterContext,
policy schedulerPolicy,
) error {
maxAttempts := policy.RetryLimit + 1
if maxAttempts < 1 {
maxAttempts = 1
}
var lastErr error
for attempt := 1; attempt <= maxAttempts; attempt++ {
select {
case <-r.shutdownCh:
return fmt.Errorf("plugin is shutting down")
default:
}
execCtx, cancel := context.WithTimeout(context.Background(), policy.ExecutionTimeout)
_, err := r.executeJobWithExecutor(execCtx, executor, job, clusterContext, int32(attempt))
cancel()
if err == nil {
return nil
}
if isExecutorAtCapacityError(err) {
return errExecutorAtCapacity
}
lastErr = err
if attempt < maxAttempts {
r.appendActivity(JobActivity{
JobID: job.JobId,
JobType: job.JobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("retrying job attempt %d/%d after error: %v", attempt, maxAttempts, err),
Stage: "retry",
OccurredAt: timeToPtr(time.Now().UTC()),
})
if !waitForShutdownOrTimer(r.shutdownCh, policy.RetryBackoff) {
return fmt.Errorf("plugin is shutting down")
}
}
}
if lastErr == nil {
lastErr = fmt.Errorf("execution failed without an explicit error")
}
return lastErr
}
func (r *Plugin) shouldSkipDetectionForWaitingJobs(jobType string, policy schedulerPolicy) (bool, int, int) {
waitingCount := r.countWaitingTrackedJobs(jobType)
threshold := waitingBacklogThreshold(policy)
if threshold <= 0 {
return false, waitingCount, threshold
}
return waitingCount >= threshold, waitingCount, threshold
}
func (r *Plugin) countWaitingTrackedJobs(jobType string) int {
normalizedJobType := strings.TrimSpace(jobType)
if normalizedJobType == "" {
return 0
}
waiting := 0
r.jobsMu.RLock()
for _, job := range r.jobs {
if job == nil {
continue
}
if strings.TrimSpace(job.JobType) != normalizedJobType {
continue
}
if !isWaitingTrackedJobState(job.State) {
continue
}
waiting++
}
r.jobsMu.RUnlock()
return waiting
}
func waitingBacklogThreshold(policy schedulerPolicy) int {
concurrency := policy.ExecutionConcurrency
if concurrency <= 0 {
concurrency = defaultScheduledExecutionConcurrency
}
threshold := concurrency * defaultWaitingBacklogMultiplier
if threshold < defaultWaitingBacklogFloor {
threshold = defaultWaitingBacklogFloor
}
if policy.MaxResults > 0 && threshold > int(policy.MaxResults) {
threshold = int(policy.MaxResults)
}
return threshold
}
func isExecutorAtCapacityError(err error) bool {
if err == nil {
return false
}
if errors.Is(err, errExecutorAtCapacity) {
return true
}
return strings.Contains(strings.ToLower(err.Error()), "executor is at capacity")
}
func buildScheduledJobSpec(jobType string, proposal *plugin_pb.JobProposal, index int) *plugin_pb.JobSpec {
now := timestamppb.Now()
jobID := fmt.Sprintf("%s-scheduled-%d-%d", jobType, now.AsTime().UnixNano(), index)
job := &plugin_pb.JobSpec{
JobId: jobID,
JobType: jobType,
Priority: plugin_pb.JobPriority_JOB_PRIORITY_NORMAL,
Parameters: map[string]*plugin_pb.ConfigValue{},
Labels: map[string]string{},
CreatedAt: now,
ScheduledAt: now,
}
if proposal == nil {
return job
}
if proposal.JobType != "" {
job.JobType = proposal.JobType
}
job.Summary = proposal.Summary
job.Detail = proposal.Detail
if proposal.Priority != plugin_pb.JobPriority_JOB_PRIORITY_UNSPECIFIED {
job.Priority = proposal.Priority
}
job.DedupeKey = proposal.DedupeKey
job.Parameters = CloneConfigValueMap(proposal.Parameters)
if proposal.Labels != nil {
job.Labels = make(map[string]string, len(proposal.Labels))
for k, v := range proposal.Labels {
job.Labels[k] = v
}
}
if proposal.NotBefore != nil {
job.ScheduledAt = proposal.NotBefore
}
return job
}
func durationFromSeconds(seconds int32, defaultValue time.Duration) time.Duration {
if seconds <= 0 {
return defaultValue
}
return time.Duration(seconds) * time.Second
}
func secondsFromDuration(duration time.Duration) int32 {
if duration <= 0 {
return 0
}
return int32(duration / time.Second)
}
func waitForShutdownOrTimer(shutdown <-chan struct{}, duration time.Duration) bool {
if duration <= 0 {
return true
}
timer := time.NewTimer(duration)
defer timer.Stop()
select {
case <-shutdown:
return false
case <-timer.C:
return true
}
}
func (r *Plugin) filterProposalsWithActiveJobs(jobType string, proposals []*plugin_pb.JobProposal) ([]*plugin_pb.JobProposal, int) {
if len(proposals) == 0 {
return proposals, 0
}
activeKeys := make(map[string]struct{})
r.jobsMu.RLock()
for _, job := range r.jobs {
if job == nil {
continue
}
if strings.TrimSpace(job.JobType) != strings.TrimSpace(jobType) {
continue
}
if !isActiveTrackedJobState(job.State) {
continue
}
key := strings.TrimSpace(job.DedupeKey)
if key == "" {
key = strings.TrimSpace(job.JobID)
}
if key == "" {
continue
}
activeKeys[key] = struct{}{}
}
r.jobsMu.RUnlock()
if len(activeKeys) == 0 {
return proposals, 0
}
filtered := make([]*plugin_pb.JobProposal, 0, len(proposals))
skipped := 0
for _, proposal := range proposals {
if proposal == nil {
continue
}
key := proposalExecutionKey(proposal)
if key != "" {
if _, exists := activeKeys[key]; exists {
skipped++
continue
}
}
filtered = append(filtered, proposal)
}
return filtered, skipped
}
func proposalExecutionKey(proposal *plugin_pb.JobProposal) string {
if proposal == nil {
return ""
}
key := strings.TrimSpace(proposal.DedupeKey)
if key != "" {
return key
}
return strings.TrimSpace(proposal.ProposalId)
}
func isActiveTrackedJobState(state string) bool {
normalized := strings.ToLower(strings.TrimSpace(state))
switch normalized {
case "pending", "assigned", "running", "in_progress", "job_state_pending", "job_state_assigned", "job_state_running":
return true
default:
return false
}
}
func isWaitingTrackedJobState(state string) bool {
normalized := strings.ToLower(strings.TrimSpace(state))
return normalized == "pending" || normalized == "job_state_pending"
}
func (r *Plugin) filterScheduledProposals(proposals []*plugin_pb.JobProposal) []*plugin_pb.JobProposal {
filtered := make([]*plugin_pb.JobProposal, 0, len(proposals))
seenInRun := make(map[string]struct{}, len(proposals))
for _, proposal := range proposals {
if proposal == nil {
continue
}
key := proposal.DedupeKey
if key == "" {
key = proposal.ProposalId
}
if key == "" {
filtered = append(filtered, proposal)
continue
}
if _, exists := seenInRun[key]; exists {
continue
}
seenInRun[key] = struct{}{}
filtered = append(filtered, proposal)
}
return filtered
}