* admin: add plugin runtime UI page and route wiring * pb: add plugin gRPC contract and generated bindings * admin/plugin: implement worker registry, runtime, monitoring, and config store * admin/dash: wire plugin runtime and expose plugin workflow APIs * command: add flags to enable plugin runtime * admin: rename remaining plugin v2 wording to plugin * admin/plugin: add detectable job type registry helper * admin/plugin: add scheduled detection and dispatch orchestration * admin/plugin: prefetch job type descriptors when workers connect * admin/plugin: add known job type discovery API and UI * admin/plugin: refresh design doc to match current implementation * admin/plugin: enforce per-worker scheduler concurrency limits * admin/plugin: use descriptor runtime defaults for scheduler policy * admin/ui: auto-load first known plugin job type on page open * admin/plugin: bootstrap persisted config from descriptor defaults * admin/plugin: dedupe scheduled proposals by dedupe key * admin/ui: add job type and state filters for plugin monitoring * admin/ui: add per-job-type plugin activity summary * admin/plugin: split descriptor read API from schema refresh * admin/ui: keep plugin summary metrics global while tables are filtered * admin/plugin: retry executor reservation before timing out * admin/plugin: expose scheduler states for monitoring * admin/ui: show per-job-type scheduler states in plugin monitor * pb/plugin: rename protobuf package to plugin * admin/plugin: rename pluginRuntime wiring to plugin * admin/plugin: remove runtime naming from plugin APIs and UI * admin/plugin: rename runtime files to plugin naming * admin/plugin: persist jobs and activities for monitor recovery * admin/plugin: lease one detector worker per job type * admin/ui: show worker load from plugin heartbeats * admin/plugin: skip stale workers for detector and executor picks * plugin/worker: add plugin worker command and stream runtime scaffold * plugin/worker: implement vacuum detect and execute handlers * admin/plugin: document external vacuum plugin worker starter * command: update plugin.worker help to reflect implemented flow * command/admin: drop legacy Plugin V2 label * plugin/worker: validate vacuum job type and respect min interval * plugin/worker: test no-op detect when min interval not elapsed * command/admin: document plugin.worker external process * plugin/worker: advertise configured concurrency in hello * command/plugin.worker: add jobType handler selection * command/plugin.worker: test handler selection by job type * command/plugin.worker: persist worker id in workingDir * admin/plugin: document plugin.worker jobType and workingDir flags * plugin/worker: support cancel request for in-flight work * plugin/worker: test cancel request acknowledgements * command/plugin.worker: document workingDir and jobType behavior * plugin/worker: emit executor activity events for monitor * plugin/worker: test executor activity builder * admin/plugin: send last successful run in detection request * admin/plugin: send cancel request when detect or execute context ends * admin/plugin: document worker cancel request responsibility * admin/handlers: expose plugin scheduler states API in no-auth mode * admin/handlers: test plugin scheduler states route registration * admin/plugin: keep worker id on worker-generated activity records * admin/plugin: test worker id propagation in monitor activities * admin/dash: always initialize plugin service * command/admin: remove plugin enable flags and default to enabled * admin/dash: drop pluginEnabled constructor parameter * admin/plugin UI: stop checking plugin enabled state * admin/plugin: remove docs for plugin enable flags * admin/dash: remove unused plugin enabled check method * admin/dash: fallback to in-memory plugin init when dataDir fails * admin/plugin API: expose worker gRPC port in status * command/plugin.worker: resolve admin gRPC port via plugin status * split plugin UI into overview/configuration/monitoring pages * Update layout_templ.go * add volume_balance plugin worker handler * wire plugin.worker CLI for volume_balance job type * add erasure_coding plugin worker handler * wire plugin.worker CLI for erasure_coding job type * support multi-job handlers in plugin worker runtime * allow plugin.worker jobType as comma-separated list * admin/plugin UI: rename to Workers and simplify config view * plugin worker: queue detection requests instead of capacity reject * Update plugin_worker.go * plugin volume_balance: remove force_move/timeout from worker config UI * plugin erasure_coding: enforce local working dir and cleanup * admin/plugin UI: rename admin settings to job scheduling * admin/plugin UI: persist and robustly render detection results * admin/plugin: record and return detection trace metadata * admin/plugin UI: show detection process and decision trace * plugin: surface detector decision trace as activities * mini: start a plugin worker by default * admin/plugin UI: split monitoring into detection and execution tabs * plugin worker: emit detection decision trace for EC and balance * admin workers UI: split monitoring into detection and execution pages * plugin scheduler: skip proposals for active assigned/running jobs * admin workers UI: add job queue tab * plugin worker: add dummy stress detector and executor job type * admin workers UI: reorder tabs to detection queue execution * admin workers UI: regenerate plugin template * plugin defaults: include dummy stress and add stress tests * plugin dummy stress: rotate detection selections across runs * plugin scheduler: remove cross-run proposal dedupe * plugin queue: track pending scheduled jobs * plugin scheduler: wait for executor capacity before dispatch * plugin scheduler: skip detection when waiting backlog is high * plugin: add disk-backed job detail API and persistence * admin ui: show plugin job detail modal from job id links * plugin: generate unique job ids instead of reusing proposal ids * plugin worker: emit heartbeats on work state changes * plugin registry: round-robin tied executor and detector picks * add temporary EC overnight stress runner * plugin job details: persist and render EC execution plans * ec volume details: color data and parity shard badges * shard labels: keep parity ids numeric and color-only distinction * admin: remove legacy maintenance UI routes and templates * admin: remove dead maintenance endpoint helpers * Update layout_templ.go * remove dummy_stress worker and command support * refactor plugin UI to job-type top tabs and sub-tabs * migrate weed worker command to plugin runtime * remove plugin.worker command and keep worker runtime with metrics * update helm worker args for jobType and execution flags * set plugin scheduling defaults to global 16 and per-worker 4 * stress: fix RPC context reuse and remove redundant variables in ec_stress_runner * admin/plugin: fix lifecycle races, safe channel operations, and terminal state constants * admin/dash: randomize job IDs and fix priority zero-value overwrite in plugin API * admin/handlers: implement buffered rendering to prevent response corruption * admin/plugin: implement debounced persistence flusher and optimize BuildJobDetail memory lookups * admin/plugin: fix priority overwrite and implement bounded wait in scheduler reserve * admin/plugin: implement atomic file writes and fix run record side effects * admin/plugin: use P prefix for parity shard labels in execution plans * admin/plugin: enable parallel execution for cancellation tests * admin: refactor time.Time fields to pointers for better JSON omitempty support * admin/plugin: implement pointer-safe time assignments and comparisons in plugin core * admin/plugin: fix time assignment and sorting logic in plugin monitor after pointer refactor * admin/plugin: update scheduler activity tracking to use time pointers * admin/plugin: fix time-based run history trimming after pointer refactor * admin/dash: fix JobSpec struct literal in plugin API after pointer refactor * admin/view: add D/P prefixes to EC shard badges for UI consistency * admin/plugin: use lifecycle-aware context for schema prefetching * Update ec_volume_details_templ.go * admin/stress: fix proposal sorting and log volume cleanup errors * stress: refine ec stress runner with math/rand and collection name - Added Collection field to VolumeEcShardsDeleteRequest for correct filename construction. - Replaced crypto/rand with seeded math/rand PRNG for bulk payloads. - Added documentation for EcMinAge zero-value behavior. - Added logging for ignored errors in volume/shard deletion. * admin: return internal server error for plugin store failures Changed error status code from 400 Bad Request to 500 Internal Server Error for failures in GetPluginJobDetail to correctly reflect server-side errors. * admin: implement safe channel sends and graceful shutdown sync - Added sync.WaitGroup to Plugin struct to manage background goroutines. - Implemented safeSendCh helper using recover() to prevent panics on closed channels. - Ensured Shutdown() waits for all background operations to complete. * admin: robustify plugin monitor with nil-safe time and record init - Standardized nil-safe assignment for *time.Time pointers (CreatedAt, UpdatedAt, CompletedAt). - Ensured persistJobDetailSnapshot initializes new records correctly if they don't exist on disk. - Fixed debounced persistence to trigger immediate write on job completion. * admin: improve scheduler shutdown behavior and logic guards - Replaced brittle error string matching with explicit r.shutdownCh selection for shutdown detection. - Removed redundant nil guard in buildScheduledJobSpec. - Standardized WaitGroup usage for schedulerLoop. * admin: implement deep copy for job parameters and atomic write fixes - Implemented deepCopyGenericValue and used it in cloneTrackedJob to prevent shared state. - Ensured atomicWriteFile creates parent directories before writing. * admin: remove unreachable branch in shard classification Removed an unreachable 'totalShards <= 0' check in classifyShardID as dataShards and parityShards are already guarded. * admin: secure UI links and use canonical shard constants - Added rel="noopener noreferrer" to external links for security. - Replaced magic number 14 with erasure_coding.TotalShardsCount. - Used renderEcShardBadge for missing shard list consistency. * admin: stabilize plugin tests and fix regressions - Composed a robust plugin_monitor_test.go to handle asynchronous persistence. - Updated all time.Time literals to use timeToPtr helper. - Added explicit Shutdown() calls in tests to synchronize with debounced writes. - Fixed syntax errors and orphaned struct literals in tests. * Potential fix for code scanning alert no. 278: Slice memory allocation with excessive size value Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> * Potential fix for code scanning alert no. 283: Uncontrolled data used in path expression Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> * admin: finalize refinements for error handling, scheduler, and race fixes - Standardized HTTP 500 status codes for store failures in plugin_api.go. - Tracked scheduled detection goroutines with sync.WaitGroup for safe shutdown. - Fixed race condition in safeSendDetectionComplete by extracting channel under lock. - Implemented deep copy for JobActivity details. - Used defaultDirPerm constant in atomicWriteFile. * test(ec): migrate admin dockertest to plugin APIs * admin/plugin_api: fix RunPluginJobTypeAPI to return 500 for server-side detection/filter errors * admin/plugin_api: fix ExecutePluginJobAPI to return 500 for job execution failures * admin/plugin_api: limit parseProtoJSONBody request body to 1MB to prevent unbounded memory usage * admin/plugin: consolidate regex to package-level validJobTypePattern; add char validation to sanitizeJobID * admin/plugin: fix racy Shutdown channel close with sync.Once * admin/plugin: track sendLoop and recv goroutines in WorkerStream with r.wg * admin/plugin: document writeProtoFiles atomicity — .pb is source of truth, .json is human-readable only * admin/plugin: extract activityLess helper to deduplicate nil-safe OccurredAt sort comparators * test/ec: check http.NewRequest errors to prevent nil req panics * test/ec: replace deprecated ioutil/math/rand, fix stale step comment 5.1→3.1 * plugin(ec): raise default detection and scheduling throughput limits * topology: include empty disks in volume list and EC capacity fallback * topology: remove hard 10-task cap for detection planning * Update ec_volume_details_templ.go * adjust default * fix tests --------- Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
940 lines
24 KiB
Go
940 lines
24 KiB
Go
package pluginworker
|
|
|
|
import (
|
|
"context"
|
|
"crypto/rand"
|
|
"encoding/hex"
|
|
"fmt"
|
|
"os"
|
|
"sort"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
|
|
"google.golang.org/grpc"
|
|
"google.golang.org/protobuf/proto"
|
|
"google.golang.org/protobuf/types/known/timestamppb"
|
|
)
|
|
|
|
const (
|
|
defaultHeartbeatInterval = 15 * time.Second
|
|
defaultReconnectDelay = 5 * time.Second
|
|
defaultSendBufferSize = 256
|
|
)
|
|
|
|
// DetectionSender sends detection responses for one request.
|
|
type DetectionSender interface {
|
|
SendProposals(*plugin_pb.DetectionProposals) error
|
|
SendComplete(*plugin_pb.DetectionComplete) error
|
|
SendActivity(*plugin_pb.ActivityEvent) error
|
|
}
|
|
|
|
// ExecutionSender sends execution progress/completion responses for one request.
|
|
type ExecutionSender interface {
|
|
SendProgress(*plugin_pb.JobProgressUpdate) error
|
|
SendCompleted(*plugin_pb.JobCompleted) error
|
|
}
|
|
|
|
// JobHandler implements one plugin job type on the worker side.
|
|
type JobHandler interface {
|
|
Capability() *plugin_pb.JobTypeCapability
|
|
Descriptor() *plugin_pb.JobTypeDescriptor
|
|
Detect(context.Context, *plugin_pb.RunDetectionRequest, DetectionSender) error
|
|
Execute(context.Context, *plugin_pb.ExecuteJobRequest, ExecutionSender) error
|
|
}
|
|
|
|
// WorkerOptions configures one plugin worker process.
|
|
type WorkerOptions struct {
|
|
AdminServer string
|
|
WorkerID string
|
|
WorkerVersion string
|
|
WorkerAddress string
|
|
HeartbeatInterval time.Duration
|
|
ReconnectDelay time.Duration
|
|
MaxDetectionConcurrency int
|
|
MaxExecutionConcurrency int
|
|
GrpcDialOption grpc.DialOption
|
|
Handlers []JobHandler
|
|
Handler JobHandler
|
|
}
|
|
|
|
// Worker runs one plugin job handler over plugin.proto stream.
|
|
type Worker struct {
|
|
opts WorkerOptions
|
|
|
|
detectSlots chan struct{}
|
|
execSlots chan struct{}
|
|
|
|
handlers map[string]JobHandler
|
|
|
|
runningMu sync.RWMutex
|
|
runningWork map[string]*plugin_pb.RunningWork
|
|
|
|
workCancelMu sync.Mutex
|
|
workCancel map[string]context.CancelFunc
|
|
|
|
workerID string
|
|
|
|
connectionMu sync.RWMutex
|
|
connected bool
|
|
}
|
|
|
|
// NewWorker creates a plugin worker instance.
|
|
func NewWorker(options WorkerOptions) (*Worker, error) {
|
|
if strings.TrimSpace(options.AdminServer) == "" {
|
|
return nil, fmt.Errorf("admin server is required")
|
|
}
|
|
if options.GrpcDialOption == nil {
|
|
return nil, fmt.Errorf("grpc dial option is required")
|
|
}
|
|
if options.HeartbeatInterval <= 0 {
|
|
options.HeartbeatInterval = defaultHeartbeatInterval
|
|
}
|
|
if options.ReconnectDelay <= 0 {
|
|
options.ReconnectDelay = defaultReconnectDelay
|
|
}
|
|
if options.MaxDetectionConcurrency <= 0 {
|
|
options.MaxDetectionConcurrency = 1
|
|
}
|
|
if options.MaxExecutionConcurrency <= 0 {
|
|
options.MaxExecutionConcurrency = 1
|
|
}
|
|
if strings.TrimSpace(options.WorkerVersion) == "" {
|
|
options.WorkerVersion = "dev"
|
|
}
|
|
|
|
workerID := strings.TrimSpace(options.WorkerID)
|
|
if workerID == "" {
|
|
workerID = generateWorkerID()
|
|
}
|
|
|
|
workerAddress := strings.TrimSpace(options.WorkerAddress)
|
|
if workerAddress == "" {
|
|
hostname, _ := os.Hostname()
|
|
workerAddress = hostname
|
|
}
|
|
opts := options
|
|
opts.WorkerAddress = workerAddress
|
|
|
|
allHandlers := make([]JobHandler, 0, len(opts.Handlers)+1)
|
|
if opts.Handler != nil {
|
|
allHandlers = append(allHandlers, opts.Handler)
|
|
}
|
|
allHandlers = append(allHandlers, opts.Handlers...)
|
|
if len(allHandlers) == 0 {
|
|
return nil, fmt.Errorf("at least one job handler is required")
|
|
}
|
|
|
|
handlers := make(map[string]JobHandler, len(allHandlers))
|
|
for i, handler := range allHandlers {
|
|
if handler == nil {
|
|
return nil, fmt.Errorf("job handler at index %d is nil", i)
|
|
}
|
|
handlerJobType, err := resolveHandlerJobType(handler)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("resolve job handler at index %d: %w", i, err)
|
|
}
|
|
key := normalizeJobTypeKey(handlerJobType)
|
|
if key == "" {
|
|
return nil, fmt.Errorf("job handler at index %d has empty job type", i)
|
|
}
|
|
if _, found := handlers[key]; found {
|
|
return nil, fmt.Errorf("duplicate job handler for job type %q", handlerJobType)
|
|
}
|
|
handlers[key] = handler
|
|
}
|
|
if opts.Handler == nil {
|
|
opts.Handler = allHandlers[0]
|
|
}
|
|
|
|
w := &Worker{
|
|
opts: opts,
|
|
detectSlots: make(chan struct{}, opts.MaxDetectionConcurrency),
|
|
execSlots: make(chan struct{}, opts.MaxExecutionConcurrency),
|
|
handlers: handlers,
|
|
runningWork: make(map[string]*plugin_pb.RunningWork),
|
|
workCancel: make(map[string]context.CancelFunc),
|
|
workerID: workerID,
|
|
}
|
|
return w, nil
|
|
}
|
|
|
|
// Run keeps the plugin worker connected and reconnects on stream failures.
|
|
func (w *Worker) Run(ctx context.Context) error {
|
|
adminAddress := pb.ServerToGrpcAddress(w.opts.AdminServer)
|
|
|
|
for {
|
|
select {
|
|
case <-ctx.Done():
|
|
return nil
|
|
default:
|
|
}
|
|
|
|
if err := w.runOnce(ctx, adminAddress); err != nil {
|
|
if ctx.Err() != nil {
|
|
return nil
|
|
}
|
|
glog.Warningf("Plugin worker %s stream ended: %v", w.workerID, err)
|
|
}
|
|
|
|
select {
|
|
case <-ctx.Done():
|
|
return nil
|
|
case <-time.After(w.opts.ReconnectDelay):
|
|
}
|
|
}
|
|
}
|
|
|
|
func (w *Worker) runOnce(ctx context.Context, adminAddress string) error {
|
|
defer w.setConnected(false)
|
|
|
|
dialCtx, cancelDial := context.WithTimeout(ctx, 5*time.Second)
|
|
defer cancelDial()
|
|
|
|
conn, err := pb.GrpcDial(dialCtx, adminAddress, false, w.opts.GrpcDialOption)
|
|
if err != nil {
|
|
return fmt.Errorf("dial admin %s: %w", adminAddress, err)
|
|
}
|
|
defer conn.Close()
|
|
|
|
client := plugin_pb.NewPluginControlServiceClient(conn)
|
|
connCtx, cancelConn := context.WithCancel(ctx)
|
|
defer cancelConn()
|
|
|
|
stream, err := client.WorkerStream(connCtx)
|
|
if err != nil {
|
|
return fmt.Errorf("open worker stream: %w", err)
|
|
}
|
|
w.setConnected(true)
|
|
|
|
sendCh := make(chan *plugin_pb.WorkerToAdminMessage, defaultSendBufferSize)
|
|
sendErrCh := make(chan error, 1)
|
|
|
|
send := func(msg *plugin_pb.WorkerToAdminMessage) bool {
|
|
if msg == nil {
|
|
return false
|
|
}
|
|
msg.WorkerId = w.workerID
|
|
if msg.SentAt == nil {
|
|
msg.SentAt = timestamppb.Now()
|
|
}
|
|
select {
|
|
case <-connCtx.Done():
|
|
return false
|
|
case sendCh <- msg:
|
|
return true
|
|
}
|
|
}
|
|
|
|
go func() {
|
|
for {
|
|
select {
|
|
case <-connCtx.Done():
|
|
return
|
|
case msg := <-sendCh:
|
|
if msg == nil {
|
|
continue
|
|
}
|
|
if err := stream.Send(msg); err != nil {
|
|
select {
|
|
case sendErrCh <- err:
|
|
default:
|
|
}
|
|
cancelConn()
|
|
return
|
|
}
|
|
}
|
|
}
|
|
}()
|
|
|
|
if !send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_Hello{Hello: w.buildHello()},
|
|
}) {
|
|
return fmt.Errorf("send worker hello: stream closed")
|
|
}
|
|
|
|
heartbeatTicker := time.NewTicker(w.opts.HeartbeatInterval)
|
|
defer heartbeatTicker.Stop()
|
|
|
|
go func() {
|
|
for {
|
|
select {
|
|
case <-connCtx.Done():
|
|
return
|
|
case <-heartbeatTicker.C:
|
|
w.sendHeartbeat(send)
|
|
}
|
|
}
|
|
}()
|
|
|
|
for {
|
|
select {
|
|
case <-connCtx.Done():
|
|
return connCtx.Err()
|
|
case err := <-sendErrCh:
|
|
return fmt.Errorf("send to admin stream: %w", err)
|
|
default:
|
|
}
|
|
|
|
message, err := stream.Recv()
|
|
if err != nil {
|
|
return fmt.Errorf("recv admin message: %w", err)
|
|
}
|
|
|
|
w.handleAdminMessage(connCtx, message, send)
|
|
}
|
|
}
|
|
|
|
// IsConnected reports whether the worker currently has an active stream to admin.
|
|
func (w *Worker) IsConnected() bool {
|
|
w.connectionMu.RLock()
|
|
defer w.connectionMu.RUnlock()
|
|
return w.connected
|
|
}
|
|
|
|
func (w *Worker) setConnected(connected bool) {
|
|
w.connectionMu.Lock()
|
|
w.connected = connected
|
|
w.connectionMu.Unlock()
|
|
}
|
|
|
|
func (w *Worker) handleAdminMessage(
|
|
ctx context.Context,
|
|
message *plugin_pb.AdminToWorkerMessage,
|
|
send func(*plugin_pb.WorkerToAdminMessage) bool,
|
|
) {
|
|
if message == nil {
|
|
return
|
|
}
|
|
|
|
switch body := message.Body.(type) {
|
|
case *plugin_pb.AdminToWorkerMessage_Hello:
|
|
_ = body
|
|
case *plugin_pb.AdminToWorkerMessage_RequestConfigSchema:
|
|
w.handleSchemaRequest(message.GetRequestId(), body.RequestConfigSchema, send)
|
|
case *plugin_pb.AdminToWorkerMessage_RunDetectionRequest:
|
|
w.handleDetectionRequest(ctx, message.GetRequestId(), body.RunDetectionRequest, send)
|
|
case *plugin_pb.AdminToWorkerMessage_ExecuteJobRequest:
|
|
w.handleExecuteRequest(ctx, message.GetRequestId(), body.ExecuteJobRequest, send)
|
|
case *plugin_pb.AdminToWorkerMessage_CancelRequest:
|
|
cancel := body.CancelRequest
|
|
targetID := ""
|
|
if cancel != nil {
|
|
targetID = strings.TrimSpace(cancel.TargetId)
|
|
}
|
|
accepted := false
|
|
ackMessage := "cancel target is required"
|
|
if targetID != "" {
|
|
if w.cancelWork(targetID) {
|
|
accepted = true
|
|
ackMessage = "cancel request accepted"
|
|
} else {
|
|
ackMessage = "cancel target not found"
|
|
}
|
|
}
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_Acknowledge{Acknowledge: &plugin_pb.WorkerAcknowledge{
|
|
RequestId: message.GetRequestId(),
|
|
Accepted: accepted,
|
|
Message: ackMessage,
|
|
}},
|
|
})
|
|
case *plugin_pb.AdminToWorkerMessage_Shutdown:
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_Acknowledge{Acknowledge: &plugin_pb.WorkerAcknowledge{
|
|
RequestId: message.GetRequestId(),
|
|
Accepted: true,
|
|
Message: "shutdown acknowledged",
|
|
}},
|
|
})
|
|
default:
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_Acknowledge{Acknowledge: &plugin_pb.WorkerAcknowledge{
|
|
RequestId: message.GetRequestId(),
|
|
Accepted: false,
|
|
Message: "unsupported request body",
|
|
}},
|
|
})
|
|
}
|
|
}
|
|
|
|
func (w *Worker) handleSchemaRequest(requestID string, request *plugin_pb.RequestConfigSchema, send func(*plugin_pb.WorkerToAdminMessage) bool) {
|
|
jobType := ""
|
|
if request != nil {
|
|
jobType = strings.TrimSpace(request.JobType)
|
|
}
|
|
|
|
handler, resolvedJobType, err := w.findHandler(jobType)
|
|
if err != nil {
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_ConfigSchemaResponse{ConfigSchemaResponse: &plugin_pb.ConfigSchemaResponse{
|
|
RequestId: requestID,
|
|
JobType: jobType,
|
|
Success: false,
|
|
ErrorMessage: err.Error(),
|
|
}},
|
|
})
|
|
return
|
|
}
|
|
|
|
descriptor := handler.Descriptor()
|
|
if descriptor == nil || descriptor.JobType == "" {
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_ConfigSchemaResponse{ConfigSchemaResponse: &plugin_pb.ConfigSchemaResponse{
|
|
RequestId: requestID,
|
|
JobType: resolvedJobType,
|
|
Success: false,
|
|
ErrorMessage: "handler descriptor is not configured",
|
|
}},
|
|
})
|
|
return
|
|
}
|
|
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_ConfigSchemaResponse{ConfigSchemaResponse: &plugin_pb.ConfigSchemaResponse{
|
|
RequestId: requestID,
|
|
JobType: descriptor.JobType,
|
|
Success: true,
|
|
JobTypeDescriptor: descriptor,
|
|
}},
|
|
})
|
|
}
|
|
|
|
func (w *Worker) handleDetectionRequest(
|
|
ctx context.Context,
|
|
requestID string,
|
|
request *plugin_pb.RunDetectionRequest,
|
|
send func(*plugin_pb.WorkerToAdminMessage) bool,
|
|
) {
|
|
if request == nil {
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_DetectionComplete{DetectionComplete: &plugin_pb.DetectionComplete{
|
|
RequestId: requestID,
|
|
Success: false,
|
|
ErrorMessage: "run detection request is nil",
|
|
}},
|
|
})
|
|
return
|
|
}
|
|
|
|
handler, resolvedJobType, err := w.findHandler(request.JobType)
|
|
if err != nil {
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_DetectionComplete{DetectionComplete: &plugin_pb.DetectionComplete{
|
|
RequestId: requestID,
|
|
JobType: request.JobType,
|
|
Success: false,
|
|
ErrorMessage: err.Error(),
|
|
}},
|
|
})
|
|
return
|
|
}
|
|
|
|
workKey := "detect:" + requestID
|
|
w.setRunningWork(workKey, &plugin_pb.RunningWork{
|
|
WorkId: requestID,
|
|
Kind: plugin_pb.WorkKind_WORK_KIND_DETECTION,
|
|
JobType: resolvedJobType,
|
|
State: plugin_pb.JobState_JOB_STATE_ASSIGNED,
|
|
ProgressPercent: 0,
|
|
Stage: "queued",
|
|
})
|
|
w.sendHeartbeat(send)
|
|
|
|
requestCtx, cancelRequest := context.WithCancel(ctx)
|
|
w.setWorkCancel(cancelRequest, requestID)
|
|
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_Acknowledge{Acknowledge: &plugin_pb.WorkerAcknowledge{
|
|
RequestId: requestID,
|
|
Accepted: true,
|
|
Message: "detection request accepted",
|
|
}},
|
|
})
|
|
|
|
go func() {
|
|
detectionSender := &detectionSender{
|
|
requestID: requestID,
|
|
jobType: resolvedJobType,
|
|
send: send,
|
|
}
|
|
defer func() {
|
|
w.clearWorkCancel(requestID)
|
|
cancelRequest()
|
|
w.clearRunningWork(workKey)
|
|
w.sendHeartbeat(send)
|
|
}()
|
|
|
|
select {
|
|
case <-requestCtx.Done():
|
|
detectionSender.SendComplete(&plugin_pb.DetectionComplete{
|
|
Success: false,
|
|
ErrorMessage: requestCtx.Err().Error(),
|
|
})
|
|
return
|
|
case w.detectSlots <- struct{}{}:
|
|
}
|
|
defer func() {
|
|
<-w.detectSlots
|
|
w.sendHeartbeat(send)
|
|
}()
|
|
|
|
w.setRunningWork(workKey, &plugin_pb.RunningWork{
|
|
WorkId: requestID,
|
|
Kind: plugin_pb.WorkKind_WORK_KIND_DETECTION,
|
|
JobType: resolvedJobType,
|
|
State: plugin_pb.JobState_JOB_STATE_RUNNING,
|
|
ProgressPercent: 0,
|
|
Stage: "detecting",
|
|
})
|
|
w.sendHeartbeat(send)
|
|
|
|
if err := handler.Detect(requestCtx, request, detectionSender); err != nil {
|
|
detectionSender.SendComplete(&plugin_pb.DetectionComplete{
|
|
Success: false,
|
|
ErrorMessage: err.Error(),
|
|
})
|
|
}
|
|
}()
|
|
}
|
|
|
|
func (w *Worker) handleExecuteRequest(
|
|
ctx context.Context,
|
|
requestID string,
|
|
request *plugin_pb.ExecuteJobRequest,
|
|
send func(*plugin_pb.WorkerToAdminMessage) bool,
|
|
) {
|
|
if request == nil || request.Job == nil {
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_JobCompleted{JobCompleted: &plugin_pb.JobCompleted{
|
|
RequestId: requestID,
|
|
Success: false,
|
|
ErrorMessage: "execute request/job is nil",
|
|
}},
|
|
})
|
|
return
|
|
}
|
|
|
|
handler, resolvedJobType, err := w.findHandler(request.Job.JobType)
|
|
if err != nil {
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_JobCompleted{JobCompleted: &plugin_pb.JobCompleted{
|
|
RequestId: requestID,
|
|
JobId: request.Job.JobId,
|
|
JobType: request.Job.JobType,
|
|
Success: false,
|
|
ErrorMessage: err.Error(),
|
|
}},
|
|
})
|
|
return
|
|
}
|
|
|
|
select {
|
|
case w.execSlots <- struct{}{}:
|
|
default:
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_JobCompleted{JobCompleted: &plugin_pb.JobCompleted{
|
|
RequestId: requestID,
|
|
JobId: request.Job.JobId,
|
|
JobType: resolvedJobType,
|
|
Success: false,
|
|
ErrorMessage: "executor is at capacity",
|
|
}},
|
|
})
|
|
return
|
|
}
|
|
w.sendHeartbeat(send)
|
|
|
|
workKey := "exec:" + requestID
|
|
w.setRunningWork(workKey, &plugin_pb.RunningWork{
|
|
WorkId: request.Job.JobId,
|
|
Kind: plugin_pb.WorkKind_WORK_KIND_EXECUTION,
|
|
JobType: resolvedJobType,
|
|
State: plugin_pb.JobState_JOB_STATE_RUNNING,
|
|
ProgressPercent: 0,
|
|
Stage: "starting",
|
|
})
|
|
w.sendHeartbeat(send)
|
|
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_Acknowledge{Acknowledge: &plugin_pb.WorkerAcknowledge{
|
|
RequestId: requestID,
|
|
Accepted: true,
|
|
Message: "execute request accepted",
|
|
}},
|
|
})
|
|
|
|
go func() {
|
|
requestCtx, cancelRequest := context.WithCancel(ctx)
|
|
w.setWorkCancel(cancelRequest, requestID, request.Job.JobId)
|
|
defer func() {
|
|
w.clearWorkCancel(requestID, request.Job.JobId)
|
|
cancelRequest()
|
|
<-w.execSlots
|
|
w.clearRunningWork(workKey)
|
|
w.sendHeartbeat(send)
|
|
}()
|
|
|
|
executionSender := &executionSender{
|
|
requestID: requestID,
|
|
jobID: request.Job.JobId,
|
|
jobType: resolvedJobType,
|
|
send: send,
|
|
onProgress: func(progress float64, stage string) {
|
|
w.updateRunningExecution(workKey, progress, stage)
|
|
},
|
|
}
|
|
if err := handler.Execute(requestCtx, request, executionSender); err != nil {
|
|
executionSender.SendCompleted(&plugin_pb.JobCompleted{
|
|
Success: false,
|
|
ErrorMessage: err.Error(),
|
|
})
|
|
}
|
|
}()
|
|
}
|
|
|
|
func (w *Worker) buildHello() *plugin_pb.WorkerHello {
|
|
jobTypeKeys := make([]string, 0, len(w.handlers))
|
|
for key := range w.handlers {
|
|
jobTypeKeys = append(jobTypeKeys, key)
|
|
}
|
|
sort.Strings(jobTypeKeys)
|
|
|
|
capabilities := make([]*plugin_pb.JobTypeCapability, 0, len(jobTypeKeys))
|
|
jobTypes := make([]string, 0, len(jobTypeKeys))
|
|
|
|
for _, key := range jobTypeKeys {
|
|
handler := w.handlers[key]
|
|
if handler == nil {
|
|
continue
|
|
}
|
|
jobType, _ := resolveHandlerJobType(handler)
|
|
capability := handler.Capability()
|
|
if capability == nil {
|
|
capability = &plugin_pb.JobTypeCapability{}
|
|
} else {
|
|
capability = proto.Clone(capability).(*plugin_pb.JobTypeCapability)
|
|
}
|
|
if strings.TrimSpace(capability.JobType) == "" {
|
|
capability.JobType = jobType
|
|
}
|
|
capability.MaxDetectionConcurrency = int32(cap(w.detectSlots))
|
|
capability.MaxExecutionConcurrency = int32(cap(w.execSlots))
|
|
capabilities = append(capabilities, capability)
|
|
if capability.JobType != "" {
|
|
jobTypes = append(jobTypes, capability.JobType)
|
|
}
|
|
}
|
|
|
|
instanceID := generateWorkerID()
|
|
return &plugin_pb.WorkerHello{
|
|
WorkerId: w.workerID,
|
|
WorkerInstanceId: "inst-" + instanceID,
|
|
Address: w.opts.WorkerAddress,
|
|
WorkerVersion: w.opts.WorkerVersion,
|
|
ProtocolVersion: "plugin.v1",
|
|
Capabilities: capabilities,
|
|
Metadata: map[string]string{
|
|
"runtime": "plugin",
|
|
"job_types": strings.Join(jobTypes, ","),
|
|
},
|
|
}
|
|
}
|
|
|
|
func (w *Worker) buildHeartbeat() *plugin_pb.WorkerHeartbeat {
|
|
w.runningMu.RLock()
|
|
running := make([]*plugin_pb.RunningWork, 0, len(w.runningWork))
|
|
for _, work := range w.runningWork {
|
|
if work == nil {
|
|
continue
|
|
}
|
|
cloned := *work
|
|
running = append(running, &cloned)
|
|
}
|
|
w.runningMu.RUnlock()
|
|
|
|
detectUsed := len(w.detectSlots)
|
|
execUsed := len(w.execSlots)
|
|
return &plugin_pb.WorkerHeartbeat{
|
|
WorkerId: w.workerID,
|
|
RunningWork: running,
|
|
DetectionSlotsUsed: int32(detectUsed),
|
|
DetectionSlotsTotal: int32(cap(w.detectSlots)),
|
|
ExecutionSlotsUsed: int32(execUsed),
|
|
ExecutionSlotsTotal: int32(cap(w.execSlots)),
|
|
QueuedJobsByType: map[string]int32{},
|
|
Metadata: map[string]string{
|
|
"runtime": "plugin",
|
|
},
|
|
}
|
|
}
|
|
|
|
func (w *Worker) sendHeartbeat(send func(*plugin_pb.WorkerToAdminMessage) bool) {
|
|
if send == nil {
|
|
return
|
|
}
|
|
send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_Heartbeat{
|
|
Heartbeat: w.buildHeartbeat(),
|
|
},
|
|
})
|
|
}
|
|
|
|
func (w *Worker) setRunningWork(key string, work *plugin_pb.RunningWork) {
|
|
if strings.TrimSpace(key) == "" || work == nil {
|
|
return
|
|
}
|
|
w.runningMu.Lock()
|
|
w.runningWork[key] = work
|
|
w.runningMu.Unlock()
|
|
}
|
|
|
|
func (w *Worker) clearRunningWork(key string) {
|
|
w.runningMu.Lock()
|
|
delete(w.runningWork, key)
|
|
w.runningMu.Unlock()
|
|
}
|
|
|
|
func (w *Worker) updateRunningExecution(key string, progress float64, stage string) {
|
|
w.runningMu.Lock()
|
|
if running := w.runningWork[key]; running != nil {
|
|
running.ProgressPercent = progress
|
|
if strings.TrimSpace(stage) != "" {
|
|
running.Stage = stage
|
|
}
|
|
running.State = plugin_pb.JobState_JOB_STATE_RUNNING
|
|
}
|
|
w.runningMu.Unlock()
|
|
}
|
|
|
|
type detectionSender struct {
|
|
requestID string
|
|
jobType string
|
|
send func(*plugin_pb.WorkerToAdminMessage) bool
|
|
}
|
|
|
|
func (s *detectionSender) SendProposals(proposals *plugin_pb.DetectionProposals) error {
|
|
if proposals == nil {
|
|
return fmt.Errorf("detection proposals are nil")
|
|
}
|
|
if proposals.RequestId == "" {
|
|
proposals.RequestId = s.requestID
|
|
}
|
|
if proposals.JobType == "" {
|
|
proposals.JobType = s.jobType
|
|
}
|
|
if !s.send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_DetectionProposals{DetectionProposals: proposals},
|
|
}) {
|
|
return fmt.Errorf("stream closed")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (s *detectionSender) SendComplete(complete *plugin_pb.DetectionComplete) error {
|
|
if complete == nil {
|
|
return fmt.Errorf("detection complete is nil")
|
|
}
|
|
if complete.RequestId == "" {
|
|
complete.RequestId = s.requestID
|
|
}
|
|
if complete.JobType == "" {
|
|
complete.JobType = s.jobType
|
|
}
|
|
if !s.send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_DetectionComplete{DetectionComplete: complete},
|
|
}) {
|
|
return fmt.Errorf("stream closed")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (s *detectionSender) SendActivity(activity *plugin_pb.ActivityEvent) error {
|
|
if activity == nil {
|
|
return fmt.Errorf("detection activity is nil")
|
|
}
|
|
if activity.CreatedAt == nil {
|
|
activity.CreatedAt = timestamppb.Now()
|
|
}
|
|
if activity.Source == plugin_pb.ActivitySource_ACTIVITY_SOURCE_UNSPECIFIED {
|
|
activity.Source = plugin_pb.ActivitySource_ACTIVITY_SOURCE_DETECTOR
|
|
}
|
|
|
|
update := &plugin_pb.JobProgressUpdate{
|
|
RequestId: s.requestID,
|
|
JobType: s.jobType,
|
|
State: plugin_pb.JobState_JOB_STATE_RUNNING,
|
|
ProgressPercent: 0,
|
|
Stage: activity.Stage,
|
|
Message: activity.Message,
|
|
Activities: []*plugin_pb.ActivityEvent{activity},
|
|
UpdatedAt: timestamppb.Now(),
|
|
}
|
|
|
|
if !s.send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_JobProgressUpdate{JobProgressUpdate: update},
|
|
}) {
|
|
return fmt.Errorf("stream closed")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type executionSender struct {
|
|
requestID string
|
|
jobID string
|
|
jobType string
|
|
send func(*plugin_pb.WorkerToAdminMessage) bool
|
|
onProgress func(progress float64, stage string)
|
|
}
|
|
|
|
func (s *executionSender) SendProgress(progress *plugin_pb.JobProgressUpdate) error {
|
|
if progress == nil {
|
|
return fmt.Errorf("job progress is nil")
|
|
}
|
|
if progress.RequestId == "" {
|
|
progress.RequestId = s.requestID
|
|
}
|
|
if progress.JobId == "" {
|
|
progress.JobId = s.jobID
|
|
}
|
|
if progress.JobType == "" {
|
|
progress.JobType = s.jobType
|
|
}
|
|
if progress.UpdatedAt == nil {
|
|
progress.UpdatedAt = timestamppb.Now()
|
|
}
|
|
if s.onProgress != nil {
|
|
s.onProgress(progress.ProgressPercent, progress.Stage)
|
|
}
|
|
if !s.send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_JobProgressUpdate{JobProgressUpdate: progress},
|
|
}) {
|
|
return fmt.Errorf("stream closed")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (s *executionSender) SendCompleted(completed *plugin_pb.JobCompleted) error {
|
|
if completed == nil {
|
|
return fmt.Errorf("job completed is nil")
|
|
}
|
|
if completed.RequestId == "" {
|
|
completed.RequestId = s.requestID
|
|
}
|
|
if completed.JobId == "" {
|
|
completed.JobId = s.jobID
|
|
}
|
|
if completed.JobType == "" {
|
|
completed.JobType = s.jobType
|
|
}
|
|
if completed.CompletedAt == nil {
|
|
completed.CompletedAt = timestamppb.Now()
|
|
}
|
|
if !s.send(&plugin_pb.WorkerToAdminMessage{
|
|
Body: &plugin_pb.WorkerToAdminMessage_JobCompleted{JobCompleted: completed},
|
|
}) {
|
|
return fmt.Errorf("stream closed")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func generateWorkerID() string {
|
|
random := make([]byte, 3)
|
|
if _, err := rand.Read(random); err != nil {
|
|
return fmt.Sprintf("plugin-%d", time.Now().UnixNano())
|
|
}
|
|
return "plugin-" + hex.EncodeToString(random)
|
|
}
|
|
|
|
func (w *Worker) setWorkCancel(cancel context.CancelFunc, keys ...string) {
|
|
if cancel == nil {
|
|
return
|
|
}
|
|
w.workCancelMu.Lock()
|
|
defer w.workCancelMu.Unlock()
|
|
for _, key := range keys {
|
|
key = strings.TrimSpace(key)
|
|
if key == "" {
|
|
continue
|
|
}
|
|
w.workCancel[key] = cancel
|
|
}
|
|
}
|
|
|
|
func (w *Worker) clearWorkCancel(keys ...string) {
|
|
w.workCancelMu.Lock()
|
|
defer w.workCancelMu.Unlock()
|
|
for _, key := range keys {
|
|
key = strings.TrimSpace(key)
|
|
if key == "" {
|
|
continue
|
|
}
|
|
delete(w.workCancel, key)
|
|
}
|
|
}
|
|
|
|
func (w *Worker) cancelWork(targetID string) bool {
|
|
targetID = strings.TrimSpace(targetID)
|
|
if targetID == "" {
|
|
return false
|
|
}
|
|
|
|
w.workCancelMu.Lock()
|
|
cancel := w.workCancel[targetID]
|
|
w.workCancelMu.Unlock()
|
|
if cancel == nil {
|
|
return false
|
|
}
|
|
cancel()
|
|
return true
|
|
}
|
|
|
|
func (w *Worker) findHandler(jobType string) (JobHandler, string, error) {
|
|
trimmed := strings.TrimSpace(jobType)
|
|
if trimmed == "" {
|
|
if len(w.handlers) == 1 {
|
|
for _, handler := range w.handlers {
|
|
resolvedJobType, err := resolveHandlerJobType(handler)
|
|
return handler, resolvedJobType, err
|
|
}
|
|
}
|
|
return nil, "", fmt.Errorf("job type is required when worker serves multiple job types")
|
|
}
|
|
|
|
key := normalizeJobTypeKey(trimmed)
|
|
handler := w.handlers[key]
|
|
if handler == nil {
|
|
return nil, "", fmt.Errorf("job type %q is not handled by this worker", trimmed)
|
|
}
|
|
resolvedJobType, err := resolveHandlerJobType(handler)
|
|
if err != nil {
|
|
return nil, "", err
|
|
}
|
|
return handler, resolvedJobType, nil
|
|
}
|
|
|
|
func resolveHandlerJobType(handler JobHandler) (string, error) {
|
|
if handler == nil {
|
|
return "", fmt.Errorf("job handler is nil")
|
|
}
|
|
|
|
if descriptor := handler.Descriptor(); descriptor != nil {
|
|
if jobType := strings.TrimSpace(descriptor.JobType); jobType != "" {
|
|
return jobType, nil
|
|
}
|
|
}
|
|
if capability := handler.Capability(); capability != nil {
|
|
if jobType := strings.TrimSpace(capability.JobType); jobType != "" {
|
|
return jobType, nil
|
|
}
|
|
}
|
|
return "", fmt.Errorf("handler job type is not configured")
|
|
}
|
|
|
|
func normalizeJobTypeKey(jobType string) string {
|
|
return strings.ToLower(strings.TrimSpace(jobType))
|
|
}
|