* admin: add plugin runtime UI page and route wiring * pb: add plugin gRPC contract and generated bindings * admin/plugin: implement worker registry, runtime, monitoring, and config store * admin/dash: wire plugin runtime and expose plugin workflow APIs * command: add flags to enable plugin runtime * admin: rename remaining plugin v2 wording to plugin * admin/plugin: add detectable job type registry helper * admin/plugin: add scheduled detection and dispatch orchestration * admin/plugin: prefetch job type descriptors when workers connect * admin/plugin: add known job type discovery API and UI * admin/plugin: refresh design doc to match current implementation * admin/plugin: enforce per-worker scheduler concurrency limits * admin/plugin: use descriptor runtime defaults for scheduler policy * admin/ui: auto-load first known plugin job type on page open * admin/plugin: bootstrap persisted config from descriptor defaults * admin/plugin: dedupe scheduled proposals by dedupe key * admin/ui: add job type and state filters for plugin monitoring * admin/ui: add per-job-type plugin activity summary * admin/plugin: split descriptor read API from schema refresh * admin/ui: keep plugin summary metrics global while tables are filtered * admin/plugin: retry executor reservation before timing out * admin/plugin: expose scheduler states for monitoring * admin/ui: show per-job-type scheduler states in plugin monitor * pb/plugin: rename protobuf package to plugin * admin/plugin: rename pluginRuntime wiring to plugin * admin/plugin: remove runtime naming from plugin APIs and UI * admin/plugin: rename runtime files to plugin naming * admin/plugin: persist jobs and activities for monitor recovery * admin/plugin: lease one detector worker per job type * admin/ui: show worker load from plugin heartbeats * admin/plugin: skip stale workers for detector and executor picks * plugin/worker: add plugin worker command and stream runtime scaffold * plugin/worker: implement vacuum detect and execute handlers * admin/plugin: document external vacuum plugin worker starter * command: update plugin.worker help to reflect implemented flow * command/admin: drop legacy Plugin V2 label * plugin/worker: validate vacuum job type and respect min interval * plugin/worker: test no-op detect when min interval not elapsed * command/admin: document plugin.worker external process * plugin/worker: advertise configured concurrency in hello * command/plugin.worker: add jobType handler selection * command/plugin.worker: test handler selection by job type * command/plugin.worker: persist worker id in workingDir * admin/plugin: document plugin.worker jobType and workingDir flags * plugin/worker: support cancel request for in-flight work * plugin/worker: test cancel request acknowledgements * command/plugin.worker: document workingDir and jobType behavior * plugin/worker: emit executor activity events for monitor * plugin/worker: test executor activity builder * admin/plugin: send last successful run in detection request * admin/plugin: send cancel request when detect or execute context ends * admin/plugin: document worker cancel request responsibility * admin/handlers: expose plugin scheduler states API in no-auth mode * admin/handlers: test plugin scheduler states route registration * admin/plugin: keep worker id on worker-generated activity records * admin/plugin: test worker id propagation in monitor activities * admin/dash: always initialize plugin service * command/admin: remove plugin enable flags and default to enabled * admin/dash: drop pluginEnabled constructor parameter * admin/plugin UI: stop checking plugin enabled state * admin/plugin: remove docs for plugin enable flags * admin/dash: remove unused plugin enabled check method * admin/dash: fallback to in-memory plugin init when dataDir fails * admin/plugin API: expose worker gRPC port in status * command/plugin.worker: resolve admin gRPC port via plugin status * split plugin UI into overview/configuration/monitoring pages * Update layout_templ.go * add volume_balance plugin worker handler * wire plugin.worker CLI for volume_balance job type * add erasure_coding plugin worker handler * wire plugin.worker CLI for erasure_coding job type * support multi-job handlers in plugin worker runtime * allow plugin.worker jobType as comma-separated list * admin/plugin UI: rename to Workers and simplify config view * plugin worker: queue detection requests instead of capacity reject * Update plugin_worker.go * plugin volume_balance: remove force_move/timeout from worker config UI * plugin erasure_coding: enforce local working dir and cleanup * admin/plugin UI: rename admin settings to job scheduling * admin/plugin UI: persist and robustly render detection results * admin/plugin: record and return detection trace metadata * admin/plugin UI: show detection process and decision trace * plugin: surface detector decision trace as activities * mini: start a plugin worker by default * admin/plugin UI: split monitoring into detection and execution tabs * plugin worker: emit detection decision trace for EC and balance * admin workers UI: split monitoring into detection and execution pages * plugin scheduler: skip proposals for active assigned/running jobs * admin workers UI: add job queue tab * plugin worker: add dummy stress detector and executor job type * admin workers UI: reorder tabs to detection queue execution * admin workers UI: regenerate plugin template * plugin defaults: include dummy stress and add stress tests * plugin dummy stress: rotate detection selections across runs * plugin scheduler: remove cross-run proposal dedupe * plugin queue: track pending scheduled jobs * plugin scheduler: wait for executor capacity before dispatch * plugin scheduler: skip detection when waiting backlog is high * plugin: add disk-backed job detail API and persistence * admin ui: show plugin job detail modal from job id links * plugin: generate unique job ids instead of reusing proposal ids * plugin worker: emit heartbeats on work state changes * plugin registry: round-robin tied executor and detector picks * add temporary EC overnight stress runner * plugin job details: persist and render EC execution plans * ec volume details: color data and parity shard badges * shard labels: keep parity ids numeric and color-only distinction * admin: remove legacy maintenance UI routes and templates * admin: remove dead maintenance endpoint helpers * Update layout_templ.go * remove dummy_stress worker and command support * refactor plugin UI to job-type top tabs and sub-tabs * migrate weed worker command to plugin runtime * remove plugin.worker command and keep worker runtime with metrics * update helm worker args for jobType and execution flags * set plugin scheduling defaults to global 16 and per-worker 4 * stress: fix RPC context reuse and remove redundant variables in ec_stress_runner * admin/plugin: fix lifecycle races, safe channel operations, and terminal state constants * admin/dash: randomize job IDs and fix priority zero-value overwrite in plugin API * admin/handlers: implement buffered rendering to prevent response corruption * admin/plugin: implement debounced persistence flusher and optimize BuildJobDetail memory lookups * admin/plugin: fix priority overwrite and implement bounded wait in scheduler reserve * admin/plugin: implement atomic file writes and fix run record side effects * admin/plugin: use P prefix for parity shard labels in execution plans * admin/plugin: enable parallel execution for cancellation tests * admin: refactor time.Time fields to pointers for better JSON omitempty support * admin/plugin: implement pointer-safe time assignments and comparisons in plugin core * admin/plugin: fix time assignment and sorting logic in plugin monitor after pointer refactor * admin/plugin: update scheduler activity tracking to use time pointers * admin/plugin: fix time-based run history trimming after pointer refactor * admin/dash: fix JobSpec struct literal in plugin API after pointer refactor * admin/view: add D/P prefixes to EC shard badges for UI consistency * admin/plugin: use lifecycle-aware context for schema prefetching * Update ec_volume_details_templ.go * admin/stress: fix proposal sorting and log volume cleanup errors * stress: refine ec stress runner with math/rand and collection name - Added Collection field to VolumeEcShardsDeleteRequest for correct filename construction. - Replaced crypto/rand with seeded math/rand PRNG for bulk payloads. - Added documentation for EcMinAge zero-value behavior. - Added logging for ignored errors in volume/shard deletion. * admin: return internal server error for plugin store failures Changed error status code from 400 Bad Request to 500 Internal Server Error for failures in GetPluginJobDetail to correctly reflect server-side errors. * admin: implement safe channel sends and graceful shutdown sync - Added sync.WaitGroup to Plugin struct to manage background goroutines. - Implemented safeSendCh helper using recover() to prevent panics on closed channels. - Ensured Shutdown() waits for all background operations to complete. * admin: robustify plugin monitor with nil-safe time and record init - Standardized nil-safe assignment for *time.Time pointers (CreatedAt, UpdatedAt, CompletedAt). - Ensured persistJobDetailSnapshot initializes new records correctly if they don't exist on disk. - Fixed debounced persistence to trigger immediate write on job completion. * admin: improve scheduler shutdown behavior and logic guards - Replaced brittle error string matching with explicit r.shutdownCh selection for shutdown detection. - Removed redundant nil guard in buildScheduledJobSpec. - Standardized WaitGroup usage for schedulerLoop. * admin: implement deep copy for job parameters and atomic write fixes - Implemented deepCopyGenericValue and used it in cloneTrackedJob to prevent shared state. - Ensured atomicWriteFile creates parent directories before writing. * admin: remove unreachable branch in shard classification Removed an unreachable 'totalShards <= 0' check in classifyShardID as dataShards and parityShards are already guarded. * admin: secure UI links and use canonical shard constants - Added rel="noopener noreferrer" to external links for security. - Replaced magic number 14 with erasure_coding.TotalShardsCount. - Used renderEcShardBadge for missing shard list consistency. * admin: stabilize plugin tests and fix regressions - Composed a robust plugin_monitor_test.go to handle asynchronous persistence. - Updated all time.Time literals to use timeToPtr helper. - Added explicit Shutdown() calls in tests to synchronize with debounced writes. - Fixed syntax errors and orphaned struct literals in tests. * Potential fix for code scanning alert no. 278: Slice memory allocation with excessive size value Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> * Potential fix for code scanning alert no. 283: Uncontrolled data used in path expression Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> * admin: finalize refinements for error handling, scheduler, and race fixes - Standardized HTTP 500 status codes for store failures in plugin_api.go. - Tracked scheduled detection goroutines with sync.WaitGroup for safe shutdown. - Fixed race condition in safeSendDetectionComplete by extracting channel under lock. - Implemented deep copy for JobActivity details. - Used defaultDirPerm constant in atomicWriteFile. * test(ec): migrate admin dockertest to plugin APIs * admin/plugin_api: fix RunPluginJobTypeAPI to return 500 for server-side detection/filter errors * admin/plugin_api: fix ExecutePluginJobAPI to return 500 for job execution failures * admin/plugin_api: limit parseProtoJSONBody request body to 1MB to prevent unbounded memory usage * admin/plugin: consolidate regex to package-level validJobTypePattern; add char validation to sanitizeJobID * admin/plugin: fix racy Shutdown channel close with sync.Once * admin/plugin: track sendLoop and recv goroutines in WorkerStream with r.wg * admin/plugin: document writeProtoFiles atomicity — .pb is source of truth, .json is human-readable only * admin/plugin: extract activityLess helper to deduplicate nil-safe OccurredAt sort comparators * test/ec: check http.NewRequest errors to prevent nil req panics * test/ec: replace deprecated ioutil/math/rand, fix stale step comment 5.1→3.1 * plugin(ec): raise default detection and scheduling throughput limits * topology: include empty disks in volume list and EC capacity fallback * topology: remove hard 10-task cap for detection planning * Update ec_volume_details_templ.go * adjust default * fix tests --------- Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
600 lines
18 KiB
Go
600 lines
18 KiB
Go
package pluginworker
|
|
|
|
import (
|
|
"context"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
|
|
"google.golang.org/grpc"
|
|
"google.golang.org/grpc/credentials/insecure"
|
|
)
|
|
|
|
func TestWorkerBuildHelloUsesConfiguredConcurrency(t *testing.T) {
|
|
handler := &testJobHandler{
|
|
capability: &plugin_pb.JobTypeCapability{
|
|
JobType: "vacuum",
|
|
CanDetect: true,
|
|
CanExecute: true,
|
|
MaxDetectionConcurrency: 99,
|
|
MaxExecutionConcurrency: 88,
|
|
},
|
|
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
|
|
}
|
|
|
|
worker, err := NewWorker(WorkerOptions{
|
|
AdminServer: "localhost:23646",
|
|
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
Handler: handler,
|
|
MaxDetectionConcurrency: 3,
|
|
MaxExecutionConcurrency: 4,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("NewWorker error = %v", err)
|
|
}
|
|
|
|
hello := worker.buildHello()
|
|
if hello == nil || len(hello.Capabilities) != 1 {
|
|
t.Fatalf("expected one capability in hello")
|
|
}
|
|
capability := hello.Capabilities[0]
|
|
if capability.MaxDetectionConcurrency != 3 {
|
|
t.Fatalf("expected max_detection_concurrency=3, got=%d", capability.MaxDetectionConcurrency)
|
|
}
|
|
if capability.MaxExecutionConcurrency != 4 {
|
|
t.Fatalf("expected max_execution_concurrency=4, got=%d", capability.MaxExecutionConcurrency)
|
|
}
|
|
if capability.JobType != "vacuum" {
|
|
t.Fatalf("expected job type vacuum, got=%q", capability.JobType)
|
|
}
|
|
}
|
|
|
|
func TestWorkerBuildHelloIncludesMultipleCapabilities(t *testing.T) {
|
|
worker, err := NewWorker(WorkerOptions{
|
|
AdminServer: "localhost:23646",
|
|
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
Handlers: []JobHandler{
|
|
&testJobHandler{
|
|
capability: &plugin_pb.JobTypeCapability{JobType: "vacuum", CanDetect: true, CanExecute: true},
|
|
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
|
|
},
|
|
&testJobHandler{
|
|
capability: &plugin_pb.JobTypeCapability{JobType: "volume_balance", CanDetect: true, CanExecute: true},
|
|
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "volume_balance"},
|
|
},
|
|
},
|
|
MaxDetectionConcurrency: 2,
|
|
MaxExecutionConcurrency: 3,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("NewWorker error = %v", err)
|
|
}
|
|
|
|
hello := worker.buildHello()
|
|
if hello == nil || len(hello.Capabilities) != 2 {
|
|
t.Fatalf("expected two capabilities in hello")
|
|
}
|
|
|
|
found := map[string]bool{}
|
|
for _, capability := range hello.Capabilities {
|
|
found[capability.JobType] = true
|
|
if capability.MaxDetectionConcurrency != 2 {
|
|
t.Fatalf("expected max_detection_concurrency=2, got=%d", capability.MaxDetectionConcurrency)
|
|
}
|
|
if capability.MaxExecutionConcurrency != 3 {
|
|
t.Fatalf("expected max_execution_concurrency=3, got=%d", capability.MaxExecutionConcurrency)
|
|
}
|
|
}
|
|
if !found["vacuum"] || !found["volume_balance"] {
|
|
t.Fatalf("expected capabilities for vacuum and volume_balance, got=%v", found)
|
|
}
|
|
}
|
|
|
|
func TestWorkerCancelWorkByTargetID(t *testing.T) {
|
|
worker, err := NewWorker(WorkerOptions{
|
|
AdminServer: "localhost:23646",
|
|
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
Handler: &testJobHandler{
|
|
capability: &plugin_pb.JobTypeCapability{JobType: "vacuum"},
|
|
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("NewWorker error = %v", err)
|
|
}
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
worker.setWorkCancel(cancel, "request-1", "job-1")
|
|
|
|
if !worker.cancelWork("request-1") {
|
|
t.Fatalf("expected cancel by request id to succeed")
|
|
}
|
|
select {
|
|
case <-ctx.Done():
|
|
case <-time.After(100 * time.Millisecond):
|
|
t.Fatalf("expected context to be canceled")
|
|
}
|
|
|
|
if !worker.cancelWork("job-1") {
|
|
t.Fatalf("expected cancel by job id to succeed")
|
|
}
|
|
if worker.cancelWork("unknown-target") {
|
|
t.Fatalf("expected cancel unknown target to fail")
|
|
}
|
|
}
|
|
|
|
func TestWorkerHandleCancelRequestAck(t *testing.T) {
|
|
worker, err := NewWorker(WorkerOptions{
|
|
AdminServer: "localhost:23646",
|
|
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
Handler: &testJobHandler{
|
|
capability: &plugin_pb.JobTypeCapability{JobType: "vacuum"},
|
|
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("NewWorker error = %v", err)
|
|
}
|
|
|
|
canceled := false
|
|
worker.setWorkCancel(func() { canceled = true }, "job-42")
|
|
|
|
var response *plugin_pb.WorkerToAdminMessage
|
|
ok := worker.handleAdminMessageForTest(&plugin_pb.AdminToWorkerMessage{
|
|
RequestId: "cancel-req-1",
|
|
Body: &plugin_pb.AdminToWorkerMessage_CancelRequest{
|
|
CancelRequest: &plugin_pb.CancelRequest{TargetId: "job-42"},
|
|
},
|
|
}, func(msg *plugin_pb.WorkerToAdminMessage) bool {
|
|
response = msg
|
|
return true
|
|
})
|
|
if !ok {
|
|
t.Fatalf("expected send callback to be invoked")
|
|
}
|
|
if !canceled {
|
|
t.Fatalf("expected registered work cancel function to be called")
|
|
}
|
|
if response == nil || response.GetAcknowledge() == nil || !response.GetAcknowledge().Accepted {
|
|
t.Fatalf("expected accepted acknowledge response, got=%+v", response)
|
|
}
|
|
|
|
response = nil
|
|
ok = worker.handleAdminMessageForTest(&plugin_pb.AdminToWorkerMessage{
|
|
RequestId: "cancel-req-2",
|
|
Body: &plugin_pb.AdminToWorkerMessage_CancelRequest{
|
|
CancelRequest: &plugin_pb.CancelRequest{TargetId: "missing"},
|
|
},
|
|
}, func(msg *plugin_pb.WorkerToAdminMessage) bool {
|
|
response = msg
|
|
return true
|
|
})
|
|
if !ok {
|
|
t.Fatalf("expected send callback to be invoked")
|
|
}
|
|
if response == nil || response.GetAcknowledge() == nil || response.GetAcknowledge().Accepted {
|
|
t.Fatalf("expected rejected acknowledge for missing target, got=%+v", response)
|
|
}
|
|
}
|
|
|
|
func TestWorkerSchemaRequestRequiresJobTypeWhenMultipleHandlers(t *testing.T) {
|
|
worker, err := NewWorker(WorkerOptions{
|
|
AdminServer: "localhost:23646",
|
|
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
Handlers: []JobHandler{
|
|
&testJobHandler{
|
|
capability: &plugin_pb.JobTypeCapability{JobType: "vacuum"},
|
|
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
|
|
},
|
|
&testJobHandler{
|
|
capability: &plugin_pb.JobTypeCapability{JobType: "erasure_coding"},
|
|
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "erasure_coding"},
|
|
},
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("NewWorker error = %v", err)
|
|
}
|
|
|
|
var response *plugin_pb.WorkerToAdminMessage
|
|
ok := worker.handleAdminMessageForTest(&plugin_pb.AdminToWorkerMessage{
|
|
RequestId: "schema-req-1",
|
|
Body: &plugin_pb.AdminToWorkerMessage_RequestConfigSchema{
|
|
RequestConfigSchema: &plugin_pb.RequestConfigSchema{},
|
|
},
|
|
}, func(msg *plugin_pb.WorkerToAdminMessage) bool {
|
|
response = msg
|
|
return true
|
|
})
|
|
if !ok {
|
|
t.Fatalf("expected send callback to be invoked")
|
|
}
|
|
schema := response.GetConfigSchemaResponse()
|
|
if schema == nil || schema.Success {
|
|
t.Fatalf("expected schema error response, got=%+v", response)
|
|
}
|
|
}
|
|
|
|
func TestWorkerHandleDetectionQueuesWhenAtCapacity(t *testing.T) {
|
|
handler := &detectionQueueTestHandler{
|
|
capability: &plugin_pb.JobTypeCapability{
|
|
JobType: "vacuum",
|
|
CanDetect: true,
|
|
CanExecute: false,
|
|
},
|
|
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
|
|
detectEntered: make(chan struct{}, 2),
|
|
detectContinue: make(chan struct{}, 2),
|
|
}
|
|
|
|
worker, err := NewWorker(WorkerOptions{
|
|
AdminServer: "localhost:23646",
|
|
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
Handler: handler,
|
|
MaxDetectionConcurrency: 1,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("NewWorker error = %v", err)
|
|
}
|
|
|
|
msgCh := make(chan *plugin_pb.WorkerToAdminMessage, 8)
|
|
send := func(msg *plugin_pb.WorkerToAdminMessage) bool {
|
|
msgCh <- msg
|
|
return true
|
|
}
|
|
|
|
sendDetection := func(requestID string) {
|
|
worker.handleAdminMessage(context.Background(), &plugin_pb.AdminToWorkerMessage{
|
|
RequestId: requestID,
|
|
Body: &plugin_pb.AdminToWorkerMessage_RunDetectionRequest{
|
|
RunDetectionRequest: &plugin_pb.RunDetectionRequest{
|
|
JobType: "vacuum",
|
|
},
|
|
},
|
|
}, send)
|
|
}
|
|
|
|
sendDetection("detect-1")
|
|
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
|
|
ack := message.GetAcknowledge()
|
|
return ack != nil && ack.RequestId == "detect-1" && ack.Accepted
|
|
}, "detection acknowledge detect-1")
|
|
<-handler.detectEntered
|
|
|
|
sendDetection("detect-2")
|
|
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
|
|
ack := message.GetAcknowledge()
|
|
return ack != nil && ack.RequestId == "detect-2" && ack.Accepted
|
|
}, "detection acknowledge detect-2")
|
|
|
|
select {
|
|
case unexpected := <-msgCh:
|
|
t.Fatalf("did not expect detection completion before slot is available, got=%+v", unexpected)
|
|
case <-time.After(100 * time.Millisecond):
|
|
}
|
|
|
|
handler.detectContinue <- struct{}{}
|
|
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
|
|
complete := message.GetDetectionComplete()
|
|
return complete != nil && complete.RequestId == "detect-1" && complete.Success
|
|
}, "detection complete detect-1")
|
|
|
|
<-handler.detectEntered
|
|
handler.detectContinue <- struct{}{}
|
|
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
|
|
complete := message.GetDetectionComplete()
|
|
return complete != nil && complete.RequestId == "detect-2" && complete.Success
|
|
}, "detection complete detect-2")
|
|
}
|
|
|
|
func TestWorkerHeartbeatReflectsActiveDetectionLoad(t *testing.T) {
|
|
handler := &detectionQueueTestHandler{
|
|
capability: &plugin_pb.JobTypeCapability{
|
|
JobType: "vacuum",
|
|
CanDetect: true,
|
|
CanExecute: false,
|
|
},
|
|
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
|
|
detectEntered: make(chan struct{}, 1),
|
|
detectContinue: make(chan struct{}, 1),
|
|
}
|
|
|
|
worker, err := NewWorker(WorkerOptions{
|
|
AdminServer: "localhost:23646",
|
|
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
Handler: handler,
|
|
MaxDetectionConcurrency: 1,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("NewWorker error = %v", err)
|
|
}
|
|
|
|
msgCh := make(chan *plugin_pb.WorkerToAdminMessage, 16)
|
|
send := func(msg *plugin_pb.WorkerToAdminMessage) bool {
|
|
msgCh <- msg
|
|
return true
|
|
}
|
|
|
|
requestID := "detect-heartbeat-1"
|
|
worker.handleAdminMessage(context.Background(), &plugin_pb.AdminToWorkerMessage{
|
|
RequestId: requestID,
|
|
Body: &plugin_pb.AdminToWorkerMessage_RunDetectionRequest{
|
|
RunDetectionRequest: &plugin_pb.RunDetectionRequest{
|
|
JobType: "vacuum",
|
|
},
|
|
},
|
|
}, send)
|
|
|
|
<-handler.detectEntered
|
|
|
|
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
|
|
heartbeat := message.GetHeartbeat()
|
|
return heartbeat != nil &&
|
|
heartbeat.DetectionSlotsUsed > 0 &&
|
|
heartbeatHasRunningWork(heartbeat, requestID, plugin_pb.WorkKind_WORK_KIND_DETECTION)
|
|
}, "active detection heartbeat")
|
|
|
|
handler.detectContinue <- struct{}{}
|
|
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
|
|
complete := message.GetDetectionComplete()
|
|
return complete != nil && complete.RequestId == requestID && complete.Success
|
|
}, "detection complete")
|
|
|
|
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
|
|
heartbeat := message.GetHeartbeat()
|
|
return heartbeat != nil && heartbeat.DetectionSlotsUsed == 0 &&
|
|
!heartbeatHasRunningWork(heartbeat, requestID, plugin_pb.WorkKind_WORK_KIND_DETECTION)
|
|
}, "idle detection heartbeat")
|
|
}
|
|
|
|
func TestWorkerHeartbeatReflectsActiveExecutionLoad(t *testing.T) {
|
|
handler := &executionHeartbeatTestHandler{
|
|
capability: &plugin_pb.JobTypeCapability{
|
|
JobType: "vacuum",
|
|
CanDetect: false,
|
|
CanExecute: true,
|
|
},
|
|
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
|
|
executeEntered: make(chan struct{}, 1),
|
|
executeDone: make(chan struct{}, 1),
|
|
}
|
|
|
|
worker, err := NewWorker(WorkerOptions{
|
|
AdminServer: "localhost:23646",
|
|
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
Handler: handler,
|
|
MaxExecutionConcurrency: 1,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("NewWorker error = %v", err)
|
|
}
|
|
|
|
msgCh := make(chan *plugin_pb.WorkerToAdminMessage, 16)
|
|
send := func(msg *plugin_pb.WorkerToAdminMessage) bool {
|
|
msgCh <- msg
|
|
return true
|
|
}
|
|
|
|
requestID := "exec-heartbeat-1"
|
|
jobID := "job-heartbeat-1"
|
|
worker.handleAdminMessage(context.Background(), &plugin_pb.AdminToWorkerMessage{
|
|
RequestId: requestID,
|
|
Body: &plugin_pb.AdminToWorkerMessage_ExecuteJobRequest{
|
|
ExecuteJobRequest: &plugin_pb.ExecuteJobRequest{
|
|
Job: &plugin_pb.JobSpec{
|
|
JobId: jobID,
|
|
JobType: "vacuum",
|
|
},
|
|
},
|
|
},
|
|
}, send)
|
|
|
|
<-handler.executeEntered
|
|
|
|
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
|
|
heartbeat := message.GetHeartbeat()
|
|
return heartbeat != nil &&
|
|
heartbeat.ExecutionSlotsUsed > 0 &&
|
|
heartbeatHasRunningWork(heartbeat, jobID, plugin_pb.WorkKind_WORK_KIND_EXECUTION)
|
|
}, "active execution heartbeat")
|
|
|
|
handler.executeDone <- struct{}{}
|
|
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
|
|
completed := message.GetJobCompleted()
|
|
return completed != nil && completed.RequestId == requestID && completed.Success
|
|
}, "execution complete")
|
|
|
|
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
|
|
heartbeat := message.GetHeartbeat()
|
|
return heartbeat != nil && heartbeat.ExecutionSlotsUsed == 0 &&
|
|
!heartbeatHasRunningWork(heartbeat, jobID, plugin_pb.WorkKind_WORK_KIND_EXECUTION)
|
|
}, "idle execution heartbeat")
|
|
}
|
|
|
|
type testJobHandler struct {
|
|
capability *plugin_pb.JobTypeCapability
|
|
descriptor *plugin_pb.JobTypeDescriptor
|
|
}
|
|
|
|
func (h *testJobHandler) Capability() *plugin_pb.JobTypeCapability {
|
|
return h.capability
|
|
}
|
|
|
|
func (h *testJobHandler) Descriptor() *plugin_pb.JobTypeDescriptor {
|
|
return h.descriptor
|
|
}
|
|
|
|
func (h *testJobHandler) Detect(context.Context, *plugin_pb.RunDetectionRequest, DetectionSender) error {
|
|
return nil
|
|
}
|
|
|
|
func (h *testJobHandler) Execute(context.Context, *plugin_pb.ExecuteJobRequest, ExecutionSender) error {
|
|
return nil
|
|
}
|
|
|
|
type detectionQueueTestHandler struct {
|
|
capability *plugin_pb.JobTypeCapability
|
|
descriptor *plugin_pb.JobTypeDescriptor
|
|
|
|
detectEntered chan struct{}
|
|
detectContinue chan struct{}
|
|
}
|
|
|
|
func (h *detectionQueueTestHandler) Capability() *plugin_pb.JobTypeCapability {
|
|
return h.capability
|
|
}
|
|
|
|
func (h *detectionQueueTestHandler) Descriptor() *plugin_pb.JobTypeDescriptor {
|
|
return h.descriptor
|
|
}
|
|
|
|
func (h *detectionQueueTestHandler) Detect(ctx context.Context, _ *plugin_pb.RunDetectionRequest, sender DetectionSender) error {
|
|
select {
|
|
case h.detectEntered <- struct{}{}:
|
|
default:
|
|
}
|
|
|
|
select {
|
|
case <-ctx.Done():
|
|
return ctx.Err()
|
|
case <-h.detectContinue:
|
|
}
|
|
|
|
return sender.SendComplete(&plugin_pb.DetectionComplete{
|
|
Success: true,
|
|
})
|
|
}
|
|
|
|
func (h *detectionQueueTestHandler) Execute(context.Context, *plugin_pb.ExecuteJobRequest, ExecutionSender) error {
|
|
return nil
|
|
}
|
|
|
|
type executionHeartbeatTestHandler struct {
|
|
capability *plugin_pb.JobTypeCapability
|
|
descriptor *plugin_pb.JobTypeDescriptor
|
|
|
|
executeEntered chan struct{}
|
|
executeDone chan struct{}
|
|
}
|
|
|
|
func (h *executionHeartbeatTestHandler) Capability() *plugin_pb.JobTypeCapability {
|
|
return h.capability
|
|
}
|
|
|
|
func (h *executionHeartbeatTestHandler) Descriptor() *plugin_pb.JobTypeDescriptor {
|
|
return h.descriptor
|
|
}
|
|
|
|
func (h *executionHeartbeatTestHandler) Detect(context.Context, *plugin_pb.RunDetectionRequest, DetectionSender) error {
|
|
return nil
|
|
}
|
|
|
|
func (h *executionHeartbeatTestHandler) Execute(ctx context.Context, request *plugin_pb.ExecuteJobRequest, sender ExecutionSender) error {
|
|
select {
|
|
case h.executeEntered <- struct{}{}:
|
|
default:
|
|
}
|
|
|
|
select {
|
|
case <-ctx.Done():
|
|
return ctx.Err()
|
|
case <-h.executeDone:
|
|
}
|
|
|
|
return sender.SendCompleted(&plugin_pb.JobCompleted{
|
|
JobId: request.Job.JobId,
|
|
JobType: request.Job.JobType,
|
|
Success: true,
|
|
})
|
|
}
|
|
|
|
func recvWorkerMessage(t *testing.T, msgCh <-chan *plugin_pb.WorkerToAdminMessage) *plugin_pb.WorkerToAdminMessage {
|
|
t.Helper()
|
|
select {
|
|
case msg := <-msgCh:
|
|
return msg
|
|
case <-time.After(2 * time.Second):
|
|
t.Fatal("timed out waiting for worker message")
|
|
return nil
|
|
}
|
|
}
|
|
|
|
func expectDetectionAck(t *testing.T, message *plugin_pb.WorkerToAdminMessage, requestID string) {
|
|
t.Helper()
|
|
ack := message.GetAcknowledge()
|
|
if ack == nil {
|
|
t.Fatalf("expected acknowledge for request %q, got=%+v", requestID, message)
|
|
}
|
|
if ack.RequestId != requestID {
|
|
t.Fatalf("expected acknowledge request_id=%q, got=%q", requestID, ack.RequestId)
|
|
}
|
|
if !ack.Accepted {
|
|
t.Fatalf("expected acknowledge accepted for request %q, got=%+v", requestID, ack)
|
|
}
|
|
}
|
|
|
|
func expectDetectionCompleteSuccess(t *testing.T, message *plugin_pb.WorkerToAdminMessage, requestID string) {
|
|
t.Helper()
|
|
complete := message.GetDetectionComplete()
|
|
if complete == nil {
|
|
t.Fatalf("expected detection complete for request %q, got=%+v", requestID, message)
|
|
}
|
|
if complete.RequestId != requestID {
|
|
t.Fatalf("expected detection complete request_id=%q, got=%q", requestID, complete.RequestId)
|
|
}
|
|
if !complete.Success {
|
|
t.Fatalf("expected successful detection complete for request %q, got=%+v", requestID, complete)
|
|
}
|
|
}
|
|
|
|
func waitForWorkerMessage(
|
|
t *testing.T,
|
|
msgCh <-chan *plugin_pb.WorkerToAdminMessage,
|
|
predicate func(*plugin_pb.WorkerToAdminMessage) bool,
|
|
description string,
|
|
) *plugin_pb.WorkerToAdminMessage {
|
|
t.Helper()
|
|
|
|
timeout := time.NewTimer(3 * time.Second)
|
|
defer timeout.Stop()
|
|
|
|
for {
|
|
select {
|
|
case message := <-msgCh:
|
|
if predicate(message) {
|
|
return message
|
|
}
|
|
case <-timeout.C:
|
|
t.Fatalf("timed out waiting for %s", description)
|
|
return nil
|
|
}
|
|
}
|
|
}
|
|
|
|
func heartbeatHasRunningWork(heartbeat *plugin_pb.WorkerHeartbeat, workID string, kind plugin_pb.WorkKind) bool {
|
|
if heartbeat == nil || workID == "" {
|
|
return false
|
|
}
|
|
for _, work := range heartbeat.RunningWork {
|
|
if work == nil {
|
|
continue
|
|
}
|
|
if work.WorkId == workID && work.Kind == kind {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (w *Worker) handleAdminMessageForTest(
|
|
message *plugin_pb.AdminToWorkerMessage,
|
|
send func(*plugin_pb.WorkerToAdminMessage) bool,
|
|
) bool {
|
|
called := false
|
|
w.handleAdminMessage(context.Background(), message, func(msg *plugin_pb.WorkerToAdminMessage) bool {
|
|
called = true
|
|
return send(msg)
|
|
})
|
|
return called
|
|
}
|