Refactor plugin system and migrate worker runtime (#8369)

* admin: add plugin runtime UI page and route wiring

* pb: add plugin gRPC contract and generated bindings

* admin/plugin: implement worker registry, runtime, monitoring, and config store

* admin/dash: wire plugin runtime and expose plugin workflow APIs

* command: add flags to enable plugin runtime

* admin: rename remaining plugin v2 wording to plugin

* admin/plugin: add detectable job type registry helper

* admin/plugin: add scheduled detection and dispatch orchestration

* admin/plugin: prefetch job type descriptors when workers connect

* admin/plugin: add known job type discovery API and UI

* admin/plugin: refresh design doc to match current implementation

* admin/plugin: enforce per-worker scheduler concurrency limits

* admin/plugin: use descriptor runtime defaults for scheduler policy

* admin/ui: auto-load first known plugin job type on page open

* admin/plugin: bootstrap persisted config from descriptor defaults

* admin/plugin: dedupe scheduled proposals by dedupe key

* admin/ui: add job type and state filters for plugin monitoring

* admin/ui: add per-job-type plugin activity summary

* admin/plugin: split descriptor read API from schema refresh

* admin/ui: keep plugin summary metrics global while tables are filtered

* admin/plugin: retry executor reservation before timing out

* admin/plugin: expose scheduler states for monitoring

* admin/ui: show per-job-type scheduler states in plugin monitor

* pb/plugin: rename protobuf package to plugin

* admin/plugin: rename pluginRuntime wiring to plugin

* admin/plugin: remove runtime naming from plugin APIs and UI

* admin/plugin: rename runtime files to plugin naming

* admin/plugin: persist jobs and activities for monitor recovery

* admin/plugin: lease one detector worker per job type

* admin/ui: show worker load from plugin heartbeats

* admin/plugin: skip stale workers for detector and executor picks

* plugin/worker: add plugin worker command and stream runtime scaffold

* plugin/worker: implement vacuum detect and execute handlers

* admin/plugin: document external vacuum plugin worker starter

* command: update plugin.worker help to reflect implemented flow

* command/admin: drop legacy Plugin V2 label

* plugin/worker: validate vacuum job type and respect min interval

* plugin/worker: test no-op detect when min interval not elapsed

* command/admin: document plugin.worker external process

* plugin/worker: advertise configured concurrency in hello

* command/plugin.worker: add jobType handler selection

* command/plugin.worker: test handler selection by job type

* command/plugin.worker: persist worker id in workingDir

* admin/plugin: document plugin.worker jobType and workingDir flags

* plugin/worker: support cancel request for in-flight work

* plugin/worker: test cancel request acknowledgements

* command/plugin.worker: document workingDir and jobType behavior

* plugin/worker: emit executor activity events for monitor

* plugin/worker: test executor activity builder

* admin/plugin: send last successful run in detection request

* admin/plugin: send cancel request when detect or execute context ends

* admin/plugin: document worker cancel request responsibility

* admin/handlers: expose plugin scheduler states API in no-auth mode

* admin/handlers: test plugin scheduler states route registration

* admin/plugin: keep worker id on worker-generated activity records

* admin/plugin: test worker id propagation in monitor activities

* admin/dash: always initialize plugin service

* command/admin: remove plugin enable flags and default to enabled

* admin/dash: drop pluginEnabled constructor parameter

* admin/plugin UI: stop checking plugin enabled state

* admin/plugin: remove docs for plugin enable flags

* admin/dash: remove unused plugin enabled check method

* admin/dash: fallback to in-memory plugin init when dataDir fails

* admin/plugin API: expose worker gRPC port in status

* command/plugin.worker: resolve admin gRPC port via plugin status

* split plugin UI into overview/configuration/monitoring pages

* Update layout_templ.go

* add volume_balance plugin worker handler

* wire plugin.worker CLI for volume_balance job type

* add erasure_coding plugin worker handler

* wire plugin.worker CLI for erasure_coding job type

* support multi-job handlers in plugin worker runtime

* allow plugin.worker jobType as comma-separated list

* admin/plugin UI: rename to Workers and simplify config view

* plugin worker: queue detection requests instead of capacity reject

* Update plugin_worker.go

* plugin volume_balance: remove force_move/timeout from worker config UI

* plugin erasure_coding: enforce local working dir and cleanup

* admin/plugin UI: rename admin settings to job scheduling

* admin/plugin UI: persist and robustly render detection results

* admin/plugin: record and return detection trace metadata

* admin/plugin UI: show detection process and decision trace

* plugin: surface detector decision trace as activities

* mini: start a plugin worker by default

* admin/plugin UI: split monitoring into detection and execution tabs

* plugin worker: emit detection decision trace for EC and balance

* admin workers UI: split monitoring into detection and execution pages

* plugin scheduler: skip proposals for active assigned/running jobs

* admin workers UI: add job queue tab

* plugin worker: add dummy stress detector and executor job type

* admin workers UI: reorder tabs to detection queue execution

* admin workers UI: regenerate plugin template

* plugin defaults: include dummy stress and add stress tests

* plugin dummy stress: rotate detection selections across runs

* plugin scheduler: remove cross-run proposal dedupe

* plugin queue: track pending scheduled jobs

* plugin scheduler: wait for executor capacity before dispatch

* plugin scheduler: skip detection when waiting backlog is high

* plugin: add disk-backed job detail API and persistence

* admin ui: show plugin job detail modal from job id links

* plugin: generate unique job ids instead of reusing proposal ids

* plugin worker: emit heartbeats on work state changes

* plugin registry: round-robin tied executor and detector picks

* add temporary EC overnight stress runner

* plugin job details: persist and render EC execution plans

* ec volume details: color data and parity shard badges

* shard labels: keep parity ids numeric and color-only distinction

* admin: remove legacy maintenance UI routes and templates

* admin: remove dead maintenance endpoint helpers

* Update layout_templ.go

* remove dummy_stress worker and command support

* refactor plugin UI to job-type top tabs and sub-tabs

* migrate weed worker command to plugin runtime

* remove plugin.worker command and keep worker runtime with metrics

* update helm worker args for jobType and execution flags

* set plugin scheduling defaults to global 16 and per-worker 4

* stress: fix RPC context reuse and remove redundant variables in ec_stress_runner

* admin/plugin: fix lifecycle races, safe channel operations, and terminal state constants

* admin/dash: randomize job IDs and fix priority zero-value overwrite in plugin API

* admin/handlers: implement buffered rendering to prevent response corruption

* admin/plugin: implement debounced persistence flusher and optimize BuildJobDetail memory lookups

* admin/plugin: fix priority overwrite and implement bounded wait in scheduler reserve

* admin/plugin: implement atomic file writes and fix run record side effects

* admin/plugin: use P prefix for parity shard labels in execution plans

* admin/plugin: enable parallel execution for cancellation tests

* admin: refactor time.Time fields to pointers for better JSON omitempty support

* admin/plugin: implement pointer-safe time assignments and comparisons in plugin core

* admin/plugin: fix time assignment and sorting logic in plugin monitor after pointer refactor

* admin/plugin: update scheduler activity tracking to use time pointers

* admin/plugin: fix time-based run history trimming after pointer refactor

* admin/dash: fix JobSpec struct literal in plugin API after pointer refactor

* admin/view: add D/P prefixes to EC shard badges for UI consistency

* admin/plugin: use lifecycle-aware context for schema prefetching

* Update ec_volume_details_templ.go

* admin/stress: fix proposal sorting and log volume cleanup errors

* stress: refine ec stress runner with math/rand and collection name

- Added Collection field to VolumeEcShardsDeleteRequest for correct filename construction.
- Replaced crypto/rand with seeded math/rand PRNG for bulk payloads.
- Added documentation for EcMinAge zero-value behavior.
- Added logging for ignored errors in volume/shard deletion.

* admin: return internal server error for plugin store failures

Changed error status code from 400 Bad Request to 500 Internal Server Error for failures in GetPluginJobDetail to correctly reflect server-side errors.

* admin: implement safe channel sends and graceful shutdown sync

- Added sync.WaitGroup to Plugin struct to manage background goroutines.
- Implemented safeSendCh helper using recover() to prevent panics on closed channels.
- Ensured Shutdown() waits for all background operations to complete.

* admin: robustify plugin monitor with nil-safe time and record init

- Standardized nil-safe assignment for *time.Time pointers (CreatedAt, UpdatedAt, CompletedAt).
- Ensured persistJobDetailSnapshot initializes new records correctly if they don't exist on disk.
- Fixed debounced persistence to trigger immediate write on job completion.

* admin: improve scheduler shutdown behavior and logic guards

- Replaced brittle error string matching with explicit r.shutdownCh selection for shutdown detection.
- Removed redundant nil guard in buildScheduledJobSpec.
- Standardized WaitGroup usage for schedulerLoop.

* admin: implement deep copy for job parameters and atomic write fixes

- Implemented deepCopyGenericValue and used it in cloneTrackedJob to prevent shared state.
- Ensured atomicWriteFile creates parent directories before writing.

* admin: remove unreachable branch in shard classification

Removed an unreachable 'totalShards <= 0' check in classifyShardID as dataShards and parityShards are already guarded.

* admin: secure UI links and use canonical shard constants

- Added rel="noopener noreferrer" to external links for security.
- Replaced magic number 14 with erasure_coding.TotalShardsCount.
- Used renderEcShardBadge for missing shard list consistency.

* admin: stabilize plugin tests and fix regressions

- Composed a robust plugin_monitor_test.go to handle asynchronous persistence.
- Updated all time.Time literals to use timeToPtr helper.
- Added explicit Shutdown() calls in tests to synchronize with debounced writes.
- Fixed syntax errors and orphaned struct literals in tests.

* Potential fix for code scanning alert no. 278: Slice memory allocation with excessive size value

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 283: Uncontrolled data used in path expression

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* admin: finalize refinements for error handling, scheduler, and race fixes

- Standardized HTTP 500 status codes for store failures in plugin_api.go.
- Tracked scheduled detection goroutines with sync.WaitGroup for safe shutdown.
- Fixed race condition in safeSendDetectionComplete by extracting channel under lock.
- Implemented deep copy for JobActivity details.
- Used defaultDirPerm constant in atomicWriteFile.

* test(ec): migrate admin dockertest to plugin APIs

* admin/plugin_api: fix RunPluginJobTypeAPI to return 500 for server-side detection/filter errors

* admin/plugin_api: fix ExecutePluginJobAPI to return 500 for job execution failures

* admin/plugin_api: limit parseProtoJSONBody request body to 1MB to prevent unbounded memory usage

* admin/plugin: consolidate regex to package-level validJobTypePattern; add char validation to sanitizeJobID

* admin/plugin: fix racy Shutdown channel close with sync.Once

* admin/plugin: track sendLoop and recv goroutines in WorkerStream with r.wg

* admin/plugin: document writeProtoFiles atomicity — .pb is source of truth, .json is human-readable only

* admin/plugin: extract activityLess helper to deduplicate nil-safe OccurredAt sort comparators

* test/ec: check http.NewRequest errors to prevent nil req panics

* test/ec: replace deprecated ioutil/math/rand, fix stale step comment 5.1→3.1

* plugin(ec): raise default detection and scheduling throughput limits

* topology: include empty disks in volume list and EC capacity fallback

* topology: remove hard 10-task cap for detection planning

* Update ec_volume_details_templ.go

* adjust default

* fix tests

---------

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
This commit is contained in:
Chris Lu
2026-02-18 13:42:41 -08:00
committed by GitHub
parent 5463038760
commit 8ec9ff4a12
82 changed files with 23419 additions and 11389 deletions

View File

@@ -0,0 +1,465 @@
package plugin
import (
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
)
const defaultWorkerStaleTimeout = 2 * time.Minute
// WorkerSession contains tracked worker metadata and plugin status.
type WorkerSession struct {
WorkerID string
WorkerInstance string
Address string
WorkerVersion string
ProtocolVersion string
ConnectedAt time.Time
LastSeenAt time.Time
Capabilities map[string]*plugin_pb.JobTypeCapability
Heartbeat *plugin_pb.WorkerHeartbeat
}
// Registry tracks connected plugin workers and capability-based selection.
type Registry struct {
mu sync.RWMutex
sessions map[string]*WorkerSession
staleAfter time.Duration
detectorCursor map[string]int
executorCursor map[string]int
}
func NewRegistry() *Registry {
return &Registry{
sessions: make(map[string]*WorkerSession),
staleAfter: defaultWorkerStaleTimeout,
detectorCursor: make(map[string]int),
executorCursor: make(map[string]int),
}
}
func (r *Registry) UpsertFromHello(hello *plugin_pb.WorkerHello) *WorkerSession {
now := time.Now()
caps := make(map[string]*plugin_pb.JobTypeCapability, len(hello.Capabilities))
for _, c := range hello.Capabilities {
if c == nil || c.JobType == "" {
continue
}
caps[c.JobType] = cloneJobTypeCapability(c)
}
r.mu.Lock()
defer r.mu.Unlock()
session, ok := r.sessions[hello.WorkerId]
if !ok {
session = &WorkerSession{
WorkerID: hello.WorkerId,
ConnectedAt: now,
}
r.sessions[hello.WorkerId] = session
}
session.WorkerInstance = hello.WorkerInstanceId
session.Address = hello.Address
session.WorkerVersion = hello.WorkerVersion
session.ProtocolVersion = hello.ProtocolVersion
session.LastSeenAt = now
session.Capabilities = caps
return cloneWorkerSession(session)
}
func (r *Registry) Remove(workerID string) {
r.mu.Lock()
defer r.mu.Unlock()
delete(r.sessions, workerID)
}
func (r *Registry) UpdateHeartbeat(workerID string, heartbeat *plugin_pb.WorkerHeartbeat) {
r.mu.Lock()
defer r.mu.Unlock()
session, ok := r.sessions[workerID]
if !ok {
return
}
session.Heartbeat = cloneWorkerHeartbeat(heartbeat)
session.LastSeenAt = time.Now()
}
func (r *Registry) Get(workerID string) (*WorkerSession, bool) {
r.mu.RLock()
defer r.mu.RUnlock()
session, ok := r.sessions[workerID]
if !ok || r.isSessionStaleLocked(session, time.Now()) {
return nil, false
}
return cloneWorkerSession(session), true
}
func (r *Registry) List() []*WorkerSession {
r.mu.RLock()
defer r.mu.RUnlock()
out := make([]*WorkerSession, 0, len(r.sessions))
now := time.Now()
for _, s := range r.sessions {
if r.isSessionStaleLocked(s, now) {
continue
}
out = append(out, cloneWorkerSession(s))
}
sort.Slice(out, func(i, j int) bool {
return out[i].WorkerID < out[j].WorkerID
})
return out
}
// DetectableJobTypes returns sorted job types that currently have at least one detect-capable worker.
func (r *Registry) DetectableJobTypes() []string {
r.mu.RLock()
defer r.mu.RUnlock()
jobTypes := make(map[string]struct{})
now := time.Now()
for _, session := range r.sessions {
if r.isSessionStaleLocked(session, now) {
continue
}
for jobType, capability := range session.Capabilities {
if capability == nil || !capability.CanDetect {
continue
}
jobTypes[jobType] = struct{}{}
}
}
out := make([]string, 0, len(jobTypes))
for jobType := range jobTypes {
out = append(out, jobType)
}
sort.Strings(out)
return out
}
// JobTypes returns sorted job types known by connected workers regardless of capability kind.
func (r *Registry) JobTypes() []string {
r.mu.RLock()
defer r.mu.RUnlock()
jobTypes := make(map[string]struct{})
now := time.Now()
for _, session := range r.sessions {
if r.isSessionStaleLocked(session, now) {
continue
}
for jobType := range session.Capabilities {
if jobType == "" {
continue
}
jobTypes[jobType] = struct{}{}
}
}
out := make([]string, 0, len(jobTypes))
for jobType := range jobTypes {
out = append(out, jobType)
}
sort.Strings(out)
return out
}
// PickSchemaProvider picks one worker for schema requests.
// Preference order:
// 1) workers that can detect this job type
// 2) workers that can execute this job type
// tie-break: more free slots, then lexical worker ID.
func (r *Registry) PickSchemaProvider(jobType string) (*WorkerSession, error) {
r.mu.RLock()
defer r.mu.RUnlock()
var candidates []*WorkerSession
now := time.Now()
for _, s := range r.sessions {
if r.isSessionStaleLocked(s, now) {
continue
}
capability := s.Capabilities[jobType]
if capability == nil {
continue
}
if capability.CanDetect || capability.CanExecute {
candidates = append(candidates, s)
}
}
if len(candidates) == 0 {
return nil, fmt.Errorf("no worker available for schema job_type=%s", jobType)
}
sort.Slice(candidates, func(i, j int) bool {
a := candidates[i]
b := candidates[j]
ac := a.Capabilities[jobType]
bc := b.Capabilities[jobType]
// Prefer detect-capable providers first.
if ac.CanDetect != bc.CanDetect {
return ac.CanDetect
}
aSlots := availableDetectionSlots(a, ac) + availableExecutionSlots(a, ac)
bSlots := availableDetectionSlots(b, bc) + availableExecutionSlots(b, bc)
if aSlots != bSlots {
return aSlots > bSlots
}
return a.WorkerID < b.WorkerID
})
return cloneWorkerSession(candidates[0]), nil
}
// PickDetector picks one detector worker for a job type.
func (r *Registry) PickDetector(jobType string) (*WorkerSession, error) {
return r.pickByKind(jobType, true)
}
// PickExecutor picks one executor worker for a job type.
func (r *Registry) PickExecutor(jobType string) (*WorkerSession, error) {
return r.pickByKind(jobType, false)
}
// ListExecutors returns sorted executor candidates for one job type.
// Ordering is by most available execution slots, then lexical worker ID.
// The top tie group is rotated round-robin to prevent sticky assignment.
func (r *Registry) ListExecutors(jobType string) ([]*WorkerSession, error) {
r.mu.Lock()
defer r.mu.Unlock()
candidates := r.collectByKindLocked(jobType, false, time.Now())
if len(candidates) == 0 {
return nil, fmt.Errorf("no executor worker available for job_type=%s", jobType)
}
sortByKind(candidates, jobType, false)
r.rotateTopCandidatesLocked(candidates, jobType, false)
out := make([]*WorkerSession, 0, len(candidates))
for _, candidate := range candidates {
out = append(out, cloneWorkerSession(candidate))
}
return out, nil
}
func (r *Registry) pickByKind(jobType string, detect bool) (*WorkerSession, error) {
r.mu.Lock()
defer r.mu.Unlock()
candidates := r.collectByKindLocked(jobType, detect, time.Now())
if len(candidates) == 0 {
kind := "executor"
if detect {
kind = "detector"
}
return nil, fmt.Errorf("no %s worker available for job_type=%s", kind, jobType)
}
sortByKind(candidates, jobType, detect)
r.rotateTopCandidatesLocked(candidates, jobType, detect)
return cloneWorkerSession(candidates[0]), nil
}
func (r *Registry) collectByKindLocked(jobType string, detect bool, now time.Time) []*WorkerSession {
var candidates []*WorkerSession
for _, session := range r.sessions {
if r.isSessionStaleLocked(session, now) {
continue
}
capability := session.Capabilities[jobType]
if capability == nil {
continue
}
if detect && capability.CanDetect {
candidates = append(candidates, session)
}
if !detect && capability.CanExecute {
candidates = append(candidates, session)
}
}
return candidates
}
func (r *Registry) isSessionStaleLocked(session *WorkerSession, now time.Time) bool {
if session == nil {
return true
}
if r.staleAfter <= 0 {
return false
}
lastSeen := session.LastSeenAt
if lastSeen.IsZero() {
lastSeen = session.ConnectedAt
}
if lastSeen.IsZero() {
return false
}
return now.Sub(lastSeen) > r.staleAfter
}
func sortByKind(candidates []*WorkerSession, jobType string, detect bool) {
sort.Slice(candidates, func(i, j int) bool {
a := candidates[i]
b := candidates[j]
ac := a.Capabilities[jobType]
bc := b.Capabilities[jobType]
aSlots := availableSlotsByKind(a, ac, detect)
bSlots := availableSlotsByKind(b, bc, detect)
if aSlots != bSlots {
return aSlots > bSlots
}
return a.WorkerID < b.WorkerID
})
}
func (r *Registry) rotateTopCandidatesLocked(candidates []*WorkerSession, jobType string, detect bool) {
if len(candidates) < 2 {
return
}
capability := candidates[0].Capabilities[jobType]
topSlots := availableSlotsByKind(candidates[0], capability, detect)
tieEnd := 1
for tieEnd < len(candidates) {
nextCapability := candidates[tieEnd].Capabilities[jobType]
if availableSlotsByKind(candidates[tieEnd], nextCapability, detect) != topSlots {
break
}
tieEnd++
}
if tieEnd <= 1 {
return
}
cursorKey := strings.TrimSpace(jobType)
if cursorKey == "" {
cursorKey = "*"
}
var offset int
if detect {
offset = r.detectorCursor[cursorKey] % tieEnd
r.detectorCursor[cursorKey] = (offset + 1) % tieEnd
} else {
offset = r.executorCursor[cursorKey] % tieEnd
r.executorCursor[cursorKey] = (offset + 1) % tieEnd
}
if offset == 0 {
return
}
prefix := append([]*WorkerSession(nil), candidates[:tieEnd]...)
for i := 0; i < tieEnd; i++ {
candidates[i] = prefix[(i+offset)%tieEnd]
}
}
func availableSlotsByKind(
session *WorkerSession,
capability *plugin_pb.JobTypeCapability,
detect bool,
) int {
if detect {
return availableDetectionSlots(session, capability)
}
return availableExecutionSlots(session, capability)
}
func availableDetectionSlots(session *WorkerSession, capability *plugin_pb.JobTypeCapability) int {
if session.Heartbeat != nil && session.Heartbeat.DetectionSlotsTotal > 0 {
free := int(session.Heartbeat.DetectionSlotsTotal - session.Heartbeat.DetectionSlotsUsed)
if free < 0 {
return 0
}
return free
}
if capability.MaxDetectionConcurrency > 0 {
return int(capability.MaxDetectionConcurrency)
}
return 1
}
func availableExecutionSlots(session *WorkerSession, capability *plugin_pb.JobTypeCapability) int {
if session.Heartbeat != nil && session.Heartbeat.ExecutionSlotsTotal > 0 {
free := int(session.Heartbeat.ExecutionSlotsTotal - session.Heartbeat.ExecutionSlotsUsed)
if free < 0 {
return 0
}
return free
}
if capability.MaxExecutionConcurrency > 0 {
return int(capability.MaxExecutionConcurrency)
}
return 1
}
func cloneWorkerSession(in *WorkerSession) *WorkerSession {
if in == nil {
return nil
}
out := *in
out.Capabilities = make(map[string]*plugin_pb.JobTypeCapability, len(in.Capabilities))
for jobType, cap := range in.Capabilities {
out.Capabilities[jobType] = cloneJobTypeCapability(cap)
}
out.Heartbeat = cloneWorkerHeartbeat(in.Heartbeat)
return &out
}
func cloneJobTypeCapability(in *plugin_pb.JobTypeCapability) *plugin_pb.JobTypeCapability {
if in == nil {
return nil
}
out := *in
return &out
}
func cloneWorkerHeartbeat(in *plugin_pb.WorkerHeartbeat) *plugin_pb.WorkerHeartbeat {
if in == nil {
return nil
}
out := *in
if in.RunningWork != nil {
out.RunningWork = make([]*plugin_pb.RunningWork, 0, len(in.RunningWork))
for _, rw := range in.RunningWork {
if rw == nil {
continue
}
clone := *rw
out.RunningWork = append(out.RunningWork, &clone)
}
}
if in.QueuedJobsByType != nil {
out.QueuedJobsByType = make(map[string]int32, len(in.QueuedJobsByType))
for k, v := range in.QueuedJobsByType {
out.QueuedJobsByType[k] = v
}
}
if in.Metadata != nil {
out.Metadata = make(map[string]string, len(in.Metadata))
for k, v := range in.Metadata {
out.Metadata[k] = v
}
}
return &out
}