Files
seaweedFS/weed/admin/plugin/plugin_scheduler_test.go
Chris Lu 8ec9ff4a12 Refactor plugin system and migrate worker runtime (#8369)
* admin: add plugin runtime UI page and route wiring

* pb: add plugin gRPC contract and generated bindings

* admin/plugin: implement worker registry, runtime, monitoring, and config store

* admin/dash: wire plugin runtime and expose plugin workflow APIs

* command: add flags to enable plugin runtime

* admin: rename remaining plugin v2 wording to plugin

* admin/plugin: add detectable job type registry helper

* admin/plugin: add scheduled detection and dispatch orchestration

* admin/plugin: prefetch job type descriptors when workers connect

* admin/plugin: add known job type discovery API and UI

* admin/plugin: refresh design doc to match current implementation

* admin/plugin: enforce per-worker scheduler concurrency limits

* admin/plugin: use descriptor runtime defaults for scheduler policy

* admin/ui: auto-load first known plugin job type on page open

* admin/plugin: bootstrap persisted config from descriptor defaults

* admin/plugin: dedupe scheduled proposals by dedupe key

* admin/ui: add job type and state filters for plugin monitoring

* admin/ui: add per-job-type plugin activity summary

* admin/plugin: split descriptor read API from schema refresh

* admin/ui: keep plugin summary metrics global while tables are filtered

* admin/plugin: retry executor reservation before timing out

* admin/plugin: expose scheduler states for monitoring

* admin/ui: show per-job-type scheduler states in plugin monitor

* pb/plugin: rename protobuf package to plugin

* admin/plugin: rename pluginRuntime wiring to plugin

* admin/plugin: remove runtime naming from plugin APIs and UI

* admin/plugin: rename runtime files to plugin naming

* admin/plugin: persist jobs and activities for monitor recovery

* admin/plugin: lease one detector worker per job type

* admin/ui: show worker load from plugin heartbeats

* admin/plugin: skip stale workers for detector and executor picks

* plugin/worker: add plugin worker command and stream runtime scaffold

* plugin/worker: implement vacuum detect and execute handlers

* admin/plugin: document external vacuum plugin worker starter

* command: update plugin.worker help to reflect implemented flow

* command/admin: drop legacy Plugin V2 label

* plugin/worker: validate vacuum job type and respect min interval

* plugin/worker: test no-op detect when min interval not elapsed

* command/admin: document plugin.worker external process

* plugin/worker: advertise configured concurrency in hello

* command/plugin.worker: add jobType handler selection

* command/plugin.worker: test handler selection by job type

* command/plugin.worker: persist worker id in workingDir

* admin/plugin: document plugin.worker jobType and workingDir flags

* plugin/worker: support cancel request for in-flight work

* plugin/worker: test cancel request acknowledgements

* command/plugin.worker: document workingDir and jobType behavior

* plugin/worker: emit executor activity events for monitor

* plugin/worker: test executor activity builder

* admin/plugin: send last successful run in detection request

* admin/plugin: send cancel request when detect or execute context ends

* admin/plugin: document worker cancel request responsibility

* admin/handlers: expose plugin scheduler states API in no-auth mode

* admin/handlers: test plugin scheduler states route registration

* admin/plugin: keep worker id on worker-generated activity records

* admin/plugin: test worker id propagation in monitor activities

* admin/dash: always initialize plugin service

* command/admin: remove plugin enable flags and default to enabled

* admin/dash: drop pluginEnabled constructor parameter

* admin/plugin UI: stop checking plugin enabled state

* admin/plugin: remove docs for plugin enable flags

* admin/dash: remove unused plugin enabled check method

* admin/dash: fallback to in-memory plugin init when dataDir fails

* admin/plugin API: expose worker gRPC port in status

* command/plugin.worker: resolve admin gRPC port via plugin status

* split plugin UI into overview/configuration/monitoring pages

* Update layout_templ.go

* add volume_balance plugin worker handler

* wire plugin.worker CLI for volume_balance job type

* add erasure_coding plugin worker handler

* wire plugin.worker CLI for erasure_coding job type

* support multi-job handlers in plugin worker runtime

* allow plugin.worker jobType as comma-separated list

* admin/plugin UI: rename to Workers and simplify config view

* plugin worker: queue detection requests instead of capacity reject

* Update plugin_worker.go

* plugin volume_balance: remove force_move/timeout from worker config UI

* plugin erasure_coding: enforce local working dir and cleanup

* admin/plugin UI: rename admin settings to job scheduling

* admin/plugin UI: persist and robustly render detection results

* admin/plugin: record and return detection trace metadata

* admin/plugin UI: show detection process and decision trace

* plugin: surface detector decision trace as activities

* mini: start a plugin worker by default

* admin/plugin UI: split monitoring into detection and execution tabs

* plugin worker: emit detection decision trace for EC and balance

* admin workers UI: split monitoring into detection and execution pages

* plugin scheduler: skip proposals for active assigned/running jobs

* admin workers UI: add job queue tab

* plugin worker: add dummy stress detector and executor job type

* admin workers UI: reorder tabs to detection queue execution

* admin workers UI: regenerate plugin template

* plugin defaults: include dummy stress and add stress tests

* plugin dummy stress: rotate detection selections across runs

* plugin scheduler: remove cross-run proposal dedupe

* plugin queue: track pending scheduled jobs

* plugin scheduler: wait for executor capacity before dispatch

* plugin scheduler: skip detection when waiting backlog is high

* plugin: add disk-backed job detail API and persistence

* admin ui: show plugin job detail modal from job id links

* plugin: generate unique job ids instead of reusing proposal ids

* plugin worker: emit heartbeats on work state changes

* plugin registry: round-robin tied executor and detector picks

* add temporary EC overnight stress runner

* plugin job details: persist and render EC execution plans

* ec volume details: color data and parity shard badges

* shard labels: keep parity ids numeric and color-only distinction

* admin: remove legacy maintenance UI routes and templates

* admin: remove dead maintenance endpoint helpers

* Update layout_templ.go

* remove dummy_stress worker and command support

* refactor plugin UI to job-type top tabs and sub-tabs

* migrate weed worker command to plugin runtime

* remove plugin.worker command and keep worker runtime with metrics

* update helm worker args for jobType and execution flags

* set plugin scheduling defaults to global 16 and per-worker 4

* stress: fix RPC context reuse and remove redundant variables in ec_stress_runner

* admin/plugin: fix lifecycle races, safe channel operations, and terminal state constants

* admin/dash: randomize job IDs and fix priority zero-value overwrite in plugin API

* admin/handlers: implement buffered rendering to prevent response corruption

* admin/plugin: implement debounced persistence flusher and optimize BuildJobDetail memory lookups

* admin/plugin: fix priority overwrite and implement bounded wait in scheduler reserve

* admin/plugin: implement atomic file writes and fix run record side effects

* admin/plugin: use P prefix for parity shard labels in execution plans

* admin/plugin: enable parallel execution for cancellation tests

* admin: refactor time.Time fields to pointers for better JSON omitempty support

* admin/plugin: implement pointer-safe time assignments and comparisons in plugin core

* admin/plugin: fix time assignment and sorting logic in plugin monitor after pointer refactor

* admin/plugin: update scheduler activity tracking to use time pointers

* admin/plugin: fix time-based run history trimming after pointer refactor

* admin/dash: fix JobSpec struct literal in plugin API after pointer refactor

* admin/view: add D/P prefixes to EC shard badges for UI consistency

* admin/plugin: use lifecycle-aware context for schema prefetching

* Update ec_volume_details_templ.go

* admin/stress: fix proposal sorting and log volume cleanup errors

* stress: refine ec stress runner with math/rand and collection name

- Added Collection field to VolumeEcShardsDeleteRequest for correct filename construction.
- Replaced crypto/rand with seeded math/rand PRNG for bulk payloads.
- Added documentation for EcMinAge zero-value behavior.
- Added logging for ignored errors in volume/shard deletion.

* admin: return internal server error for plugin store failures

Changed error status code from 400 Bad Request to 500 Internal Server Error for failures in GetPluginJobDetail to correctly reflect server-side errors.

* admin: implement safe channel sends and graceful shutdown sync

- Added sync.WaitGroup to Plugin struct to manage background goroutines.
- Implemented safeSendCh helper using recover() to prevent panics on closed channels.
- Ensured Shutdown() waits for all background operations to complete.

* admin: robustify plugin monitor with nil-safe time and record init

- Standardized nil-safe assignment for *time.Time pointers (CreatedAt, UpdatedAt, CompletedAt).
- Ensured persistJobDetailSnapshot initializes new records correctly if they don't exist on disk.
- Fixed debounced persistence to trigger immediate write on job completion.

* admin: improve scheduler shutdown behavior and logic guards

- Replaced brittle error string matching with explicit r.shutdownCh selection for shutdown detection.
- Removed redundant nil guard in buildScheduledJobSpec.
- Standardized WaitGroup usage for schedulerLoop.

* admin: implement deep copy for job parameters and atomic write fixes

- Implemented deepCopyGenericValue and used it in cloneTrackedJob to prevent shared state.
- Ensured atomicWriteFile creates parent directories before writing.

* admin: remove unreachable branch in shard classification

Removed an unreachable 'totalShards <= 0' check in classifyShardID as dataShards and parityShards are already guarded.

* admin: secure UI links and use canonical shard constants

- Added rel="noopener noreferrer" to external links for security.
- Replaced magic number 14 with erasure_coding.TotalShardsCount.
- Used renderEcShardBadge for missing shard list consistency.

* admin: stabilize plugin tests and fix regressions

- Composed a robust plugin_monitor_test.go to handle asynchronous persistence.
- Updated all time.Time literals to use timeToPtr helper.
- Added explicit Shutdown() calls in tests to synchronize with debounced writes.
- Fixed syntax errors and orphaned struct literals in tests.

* Potential fix for code scanning alert no. 278: Slice memory allocation with excessive size value

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 283: Uncontrolled data used in path expression

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* admin: finalize refinements for error handling, scheduler, and race fixes

- Standardized HTTP 500 status codes for store failures in plugin_api.go.
- Tracked scheduled detection goroutines with sync.WaitGroup for safe shutdown.
- Fixed race condition in safeSendDetectionComplete by extracting channel under lock.
- Implemented deep copy for JobActivity details.
- Used defaultDirPerm constant in atomicWriteFile.

* test(ec): migrate admin dockertest to plugin APIs

* admin/plugin_api: fix RunPluginJobTypeAPI to return 500 for server-side detection/filter errors

* admin/plugin_api: fix ExecutePluginJobAPI to return 500 for job execution failures

* admin/plugin_api: limit parseProtoJSONBody request body to 1MB to prevent unbounded memory usage

* admin/plugin: consolidate regex to package-level validJobTypePattern; add char validation to sanitizeJobID

* admin/plugin: fix racy Shutdown channel close with sync.Once

* admin/plugin: track sendLoop and recv goroutines in WorkerStream with r.wg

* admin/plugin: document writeProtoFiles atomicity — .pb is source of truth, .json is human-readable only

* admin/plugin: extract activityLess helper to deduplicate nil-safe OccurredAt sort comparators

* test/ec: check http.NewRequest errors to prevent nil req panics

* test/ec: replace deprecated ioutil/math/rand, fix stale step comment 5.1→3.1

* plugin(ec): raise default detection and scheduling throughput limits

* topology: include empty disks in volume list and EC capacity fallback

* topology: remove hard 10-task cap for detection planning

* Update ec_volume_details_templ.go

* adjust default

* fix tests

---------

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
2026-02-18 13:42:41 -08:00

584 lines
16 KiB
Go

package plugin
import (
"fmt"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
)
func TestLoadSchedulerPolicyUsesAdminConfig(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
err = pluginSvc.SaveJobTypeConfig(&plugin_pb.PersistedJobTypeConfig{
JobType: "vacuum",
AdminRuntime: &plugin_pb.AdminRuntimeConfig{
Enabled: true,
DetectionIntervalSeconds: 30,
DetectionTimeoutSeconds: 20,
MaxJobsPerDetection: 123,
GlobalExecutionConcurrency: 5,
PerWorkerExecutionConcurrency: 2,
RetryLimit: 4,
RetryBackoffSeconds: 7,
},
})
if err != nil {
t.Fatalf("SaveJobTypeConfig: %v", err)
}
policy, enabled, err := pluginSvc.loadSchedulerPolicy("vacuum")
if err != nil {
t.Fatalf("loadSchedulerPolicy: %v", err)
}
if !enabled {
t.Fatalf("expected enabled policy")
}
if policy.MaxResults != 123 {
t.Fatalf("unexpected max results: got=%d", policy.MaxResults)
}
if policy.ExecutionConcurrency != 5 {
t.Fatalf("unexpected global concurrency: got=%d", policy.ExecutionConcurrency)
}
if policy.PerWorkerConcurrency != 2 {
t.Fatalf("unexpected per-worker concurrency: got=%d", policy.PerWorkerConcurrency)
}
if policy.RetryLimit != 4 {
t.Fatalf("unexpected retry limit: got=%d", policy.RetryLimit)
}
}
func TestLoadSchedulerPolicyUsesDescriptorDefaultsWhenConfigMissing(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
err = pluginSvc.store.SaveDescriptor("ec", &plugin_pb.JobTypeDescriptor{
JobType: "ec",
AdminRuntimeDefaults: &plugin_pb.AdminRuntimeDefaults{
Enabled: true,
DetectionIntervalSeconds: 60,
DetectionTimeoutSeconds: 25,
MaxJobsPerDetection: 30,
GlobalExecutionConcurrency: 4,
PerWorkerExecutionConcurrency: 2,
RetryLimit: 3,
RetryBackoffSeconds: 6,
},
})
if err != nil {
t.Fatalf("SaveDescriptor: %v", err)
}
policy, enabled, err := pluginSvc.loadSchedulerPolicy("ec")
if err != nil {
t.Fatalf("loadSchedulerPolicy: %v", err)
}
if !enabled {
t.Fatalf("expected enabled policy from descriptor defaults")
}
if policy.MaxResults != 30 {
t.Fatalf("unexpected max results: got=%d", policy.MaxResults)
}
if policy.ExecutionConcurrency != 4 {
t.Fatalf("unexpected global concurrency: got=%d", policy.ExecutionConcurrency)
}
if policy.PerWorkerConcurrency != 2 {
t.Fatalf("unexpected per-worker concurrency: got=%d", policy.PerWorkerConcurrency)
}
}
func TestReserveScheduledExecutorRespectsPerWorkerLimit(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanExecute: true, MaxExecutionConcurrency: 4},
},
})
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-b",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanExecute: true, MaxExecutionConcurrency: 2},
},
})
policy := schedulerPolicy{
PerWorkerConcurrency: 1,
ExecutorReserveBackoff: time.Millisecond,
}
executor1, release1, err := pluginSvc.reserveScheduledExecutor("balance", policy)
if err != nil {
t.Fatalf("reserve executor 1: %v", err)
}
defer release1()
executor2, release2, err := pluginSvc.reserveScheduledExecutor("balance", policy)
if err != nil {
t.Fatalf("reserve executor 2: %v", err)
}
defer release2()
if executor1.WorkerID == executor2.WorkerID {
t.Fatalf("expected different executors due per-worker limit, got same worker %s", executor1.WorkerID)
}
}
func TestFilterScheduledProposalsDedupe(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
proposals := []*plugin_pb.JobProposal{
{ProposalId: "p1", DedupeKey: "d1"},
{ProposalId: "p2", DedupeKey: "d1"}, // same dedupe key
{ProposalId: "p3", DedupeKey: "d3"},
{ProposalId: "p3"}, // fallback dedupe by proposal id
{ProposalId: "p4"},
{ProposalId: "p4"}, // same proposal id, no dedupe key
}
filtered := pluginSvc.filterScheduledProposals(proposals)
if len(filtered) != 4 {
t.Fatalf("unexpected filtered size: got=%d want=4", len(filtered))
}
filtered2 := pluginSvc.filterScheduledProposals(proposals)
if len(filtered2) != 4 {
t.Fatalf("expected second run dedupe to be per-run only, got=%d", len(filtered2))
}
}
func TestBuildScheduledJobSpecDoesNotReuseProposalID(t *testing.T) {
t.Parallel()
proposal := &plugin_pb.JobProposal{
ProposalId: "vacuum-2",
DedupeKey: "vacuum:2",
JobType: "vacuum",
}
jobA := buildScheduledJobSpec("vacuum", proposal, 0)
jobB := buildScheduledJobSpec("vacuum", proposal, 1)
if jobA.JobId == proposal.ProposalId {
t.Fatalf("scheduled job id must not reuse proposal id: %s", jobA.JobId)
}
if jobB.JobId == proposal.ProposalId {
t.Fatalf("scheduled job id must not reuse proposal id: %s", jobB.JobId)
}
if jobA.JobId == jobB.JobId {
t.Fatalf("scheduled job ids must be unique across jobs: %s", jobA.JobId)
}
}
func TestFilterProposalsWithActiveJobs(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.trackExecutionStart("req-1", "worker-a", &plugin_pb.JobSpec{
JobId: "job-1",
JobType: "vacuum",
DedupeKey: "vacuum:k1",
}, 1)
pluginSvc.trackExecutionStart("req-2", "worker-b", &plugin_pb.JobSpec{
JobId: "job-2",
JobType: "vacuum",
}, 1)
pluginSvc.trackExecutionQueued(&plugin_pb.JobSpec{
JobId: "job-3",
JobType: "vacuum",
DedupeKey: "vacuum:k4",
})
filtered, skipped := pluginSvc.filterProposalsWithActiveJobs("vacuum", []*plugin_pb.JobProposal{
{ProposalId: "proposal-1", JobType: "vacuum", DedupeKey: "vacuum:k1"},
{ProposalId: "job-2", JobType: "vacuum"},
{ProposalId: "proposal-2b", JobType: "vacuum", DedupeKey: "vacuum:k4"},
{ProposalId: "proposal-3", JobType: "vacuum", DedupeKey: "vacuum:k3"},
{ProposalId: "proposal-4", JobType: "balance", DedupeKey: "balance:k1"},
})
if skipped != 3 {
t.Fatalf("unexpected skipped count: got=%d want=3", skipped)
}
if len(filtered) != 2 {
t.Fatalf("unexpected filtered size: got=%d want=2", len(filtered))
}
if filtered[0].ProposalId != "proposal-3" || filtered[1].ProposalId != "proposal-4" {
t.Fatalf("unexpected filtered proposals: got=%s,%s", filtered[0].ProposalId, filtered[1].ProposalId)
}
}
func TestReserveScheduledExecutorTimesOutWhenNoExecutor(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
policy := schedulerPolicy{
ExecutionTimeout: 30 * time.Millisecond,
ExecutorReserveBackoff: 5 * time.Millisecond,
PerWorkerConcurrency: 1,
}
start := time.Now()
pluginSvc.Shutdown()
_, _, err = pluginSvc.reserveScheduledExecutor("missing-job-type", policy)
if err == nil {
t.Fatalf("expected reservation shutdown error")
}
if time.Since(start) > 50*time.Millisecond {
t.Fatalf("reservation returned too late after shutdown: duration=%v", time.Since(start))
}
}
func TestReserveScheduledExecutorWaitsForWorkerCapacity(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanExecute: true, MaxExecutionConcurrency: 1},
},
})
policy := schedulerPolicy{
ExecutionTimeout: time.Second,
PerWorkerConcurrency: 8,
ExecutorReserveBackoff: 5 * time.Millisecond,
}
_, release1, err := pluginSvc.reserveScheduledExecutor("balance", policy)
if err != nil {
t.Fatalf("reserve executor 1: %v", err)
}
defer release1()
type reserveResult struct {
err error
}
secondReserveCh := make(chan reserveResult, 1)
go func() {
_, release2, reserveErr := pluginSvc.reserveScheduledExecutor("balance", policy)
if release2 != nil {
release2()
}
secondReserveCh <- reserveResult{err: reserveErr}
}()
select {
case result := <-secondReserveCh:
t.Fatalf("expected second reservation to wait for capacity, got=%v", result.err)
case <-time.After(25 * time.Millisecond):
// Expected: still waiting.
}
release1()
select {
case result := <-secondReserveCh:
if result.err != nil {
t.Fatalf("second reservation error: %v", result.err)
}
case <-time.After(200 * time.Millisecond):
t.Fatalf("second reservation did not acquire after capacity release")
}
}
func TestShouldSkipDetectionForWaitingJobs(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
policy := schedulerPolicy{
ExecutionConcurrency: 2,
MaxResults: 100,
}
threshold := waitingBacklogThreshold(policy)
if threshold <= 0 {
t.Fatalf("expected positive waiting threshold")
}
for i := 0; i < threshold; i++ {
pluginSvc.trackExecutionQueued(&plugin_pb.JobSpec{
JobId: fmt.Sprintf("job-waiting-%d", i),
JobType: "vacuum",
DedupeKey: fmt.Sprintf("vacuum:%d", i),
})
}
skip, waitingCount, waitingThreshold := pluginSvc.shouldSkipDetectionForWaitingJobs("vacuum", policy)
if !skip {
t.Fatalf("expected detection to skip when waiting backlog reaches threshold")
}
if waitingCount != threshold {
t.Fatalf("unexpected waiting count: got=%d want=%d", waitingCount, threshold)
}
if waitingThreshold != threshold {
t.Fatalf("unexpected waiting threshold: got=%d want=%d", waitingThreshold, threshold)
}
}
func TestWaitingBacklogThresholdHonorsMaxResultsCap(t *testing.T) {
t.Parallel()
policy := schedulerPolicy{
ExecutionConcurrency: 8,
MaxResults: 6,
}
threshold := waitingBacklogThreshold(policy)
if threshold != 6 {
t.Fatalf("expected threshold to be capped by max results, got=%d", threshold)
}
}
func TestListSchedulerStatesIncludesPolicyAndState(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
const jobType = "vacuum"
err = pluginSvc.SaveJobTypeConfig(&plugin_pb.PersistedJobTypeConfig{
JobType: jobType,
AdminRuntime: &plugin_pb.AdminRuntimeConfig{
Enabled: true,
DetectionIntervalSeconds: 45,
DetectionTimeoutSeconds: 30,
MaxJobsPerDetection: 80,
GlobalExecutionConcurrency: 3,
PerWorkerExecutionConcurrency: 2,
RetryLimit: 1,
RetryBackoffSeconds: 9,
},
})
if err != nil {
t.Fatalf("SaveJobTypeConfig: %v", err)
}
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: jobType, CanDetect: true, CanExecute: true},
},
})
nextDetectionAt := time.Now().UTC().Add(2 * time.Minute).Round(time.Second)
pluginSvc.schedulerMu.Lock()
pluginSvc.nextDetectionAt[jobType] = nextDetectionAt
pluginSvc.detectionInFlight[jobType] = true
pluginSvc.schedulerMu.Unlock()
states, err := pluginSvc.ListSchedulerStates()
if err != nil {
t.Fatalf("ListSchedulerStates: %v", err)
}
state := findSchedulerState(states, jobType)
if state == nil {
t.Fatalf("missing scheduler state for %s", jobType)
}
if !state.Enabled {
t.Fatalf("expected enabled scheduler state")
}
if state.PolicyError != "" {
t.Fatalf("unexpected policy error: %s", state.PolicyError)
}
if !state.DetectionInFlight {
t.Fatalf("expected detection in flight")
}
if state.NextDetectionAt == nil {
t.Fatalf("expected next detection time")
}
if state.NextDetectionAt.Unix() != nextDetectionAt.Unix() {
t.Fatalf("unexpected next detection time: got=%v want=%v", state.NextDetectionAt, nextDetectionAt)
}
if state.DetectionIntervalSeconds != 45 {
t.Fatalf("unexpected detection interval: got=%d", state.DetectionIntervalSeconds)
}
if state.DetectionTimeoutSeconds != 30 {
t.Fatalf("unexpected detection timeout: got=%d", state.DetectionTimeoutSeconds)
}
if state.ExecutionTimeoutSeconds != 90 {
t.Fatalf("unexpected execution timeout: got=%d", state.ExecutionTimeoutSeconds)
}
if state.MaxJobsPerDetection != 80 {
t.Fatalf("unexpected max jobs per detection: got=%d", state.MaxJobsPerDetection)
}
if state.GlobalExecutionConcurrency != 3 {
t.Fatalf("unexpected global execution concurrency: got=%d", state.GlobalExecutionConcurrency)
}
if state.PerWorkerExecutionConcurrency != 2 {
t.Fatalf("unexpected per worker execution concurrency: got=%d", state.PerWorkerExecutionConcurrency)
}
if state.RetryLimit != 1 {
t.Fatalf("unexpected retry limit: got=%d", state.RetryLimit)
}
if state.RetryBackoffSeconds != 9 {
t.Fatalf("unexpected retry backoff: got=%d", state.RetryBackoffSeconds)
}
if !state.DetectorAvailable || state.DetectorWorkerID != "worker-a" {
t.Fatalf("unexpected detector assignment: available=%v worker=%s", state.DetectorAvailable, state.DetectorWorkerID)
}
if state.ExecutorWorkerCount != 1 {
t.Fatalf("unexpected executor worker count: got=%d", state.ExecutorWorkerCount)
}
}
func TestListSchedulerStatesShowsDisabledWhenNoPolicy(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
const jobType = "balance"
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-b",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: jobType, CanDetect: true, CanExecute: true},
},
})
states, err := pluginSvc.ListSchedulerStates()
if err != nil {
t.Fatalf("ListSchedulerStates: %v", err)
}
state := findSchedulerState(states, jobType)
if state == nil {
t.Fatalf("missing scheduler state for %s", jobType)
}
if state.Enabled {
t.Fatalf("expected disabled scheduler state")
}
if state.PolicyError != "" {
t.Fatalf("unexpected policy error: %s", state.PolicyError)
}
if !state.DetectorAvailable || state.DetectorWorkerID != "worker-b" {
t.Fatalf("unexpected detector details: available=%v worker=%s", state.DetectorAvailable, state.DetectorWorkerID)
}
if state.ExecutorWorkerCount != 1 {
t.Fatalf("unexpected executor worker count: got=%d", state.ExecutorWorkerCount)
}
}
func findSchedulerState(states []SchedulerJobTypeState, jobType string) *SchedulerJobTypeState {
for i := range states {
if states[i].JobType == jobType {
return &states[i]
}
}
return nil
}
func TestPickDetectorPrefersLeasedWorker(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true},
},
})
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-b",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true},
},
})
pluginSvc.setDetectorLease("vacuum", "worker-b")
detector, err := pluginSvc.pickDetector("vacuum")
if err != nil {
t.Fatalf("pickDetector: %v", err)
}
if detector.WorkerID != "worker-b" {
t.Fatalf("expected leased detector worker-b, got=%s", detector.WorkerID)
}
}
func TestPickDetectorReassignsWhenLeaseIsStale(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true},
},
})
pluginSvc.setDetectorLease("vacuum", "worker-stale")
detector, err := pluginSvc.pickDetector("vacuum")
if err != nil {
t.Fatalf("pickDetector: %v", err)
}
if detector.WorkerID != "worker-a" {
t.Fatalf("expected reassigned detector worker-a, got=%s", detector.WorkerID)
}
lease := pluginSvc.getDetectorLease("vacuum")
if lease != "worker-a" {
t.Fatalf("expected detector lease to be updated to worker-a, got=%s", lease)
}
}