Files
seaweedFS/weed/admin/dash/plugin_api.go
Chris Lu 8ec9ff4a12 Refactor plugin system and migrate worker runtime (#8369)
* admin: add plugin runtime UI page and route wiring

* pb: add plugin gRPC contract and generated bindings

* admin/plugin: implement worker registry, runtime, monitoring, and config store

* admin/dash: wire plugin runtime and expose plugin workflow APIs

* command: add flags to enable plugin runtime

* admin: rename remaining plugin v2 wording to plugin

* admin/plugin: add detectable job type registry helper

* admin/plugin: add scheduled detection and dispatch orchestration

* admin/plugin: prefetch job type descriptors when workers connect

* admin/plugin: add known job type discovery API and UI

* admin/plugin: refresh design doc to match current implementation

* admin/plugin: enforce per-worker scheduler concurrency limits

* admin/plugin: use descriptor runtime defaults for scheduler policy

* admin/ui: auto-load first known plugin job type on page open

* admin/plugin: bootstrap persisted config from descriptor defaults

* admin/plugin: dedupe scheduled proposals by dedupe key

* admin/ui: add job type and state filters for plugin monitoring

* admin/ui: add per-job-type plugin activity summary

* admin/plugin: split descriptor read API from schema refresh

* admin/ui: keep plugin summary metrics global while tables are filtered

* admin/plugin: retry executor reservation before timing out

* admin/plugin: expose scheduler states for monitoring

* admin/ui: show per-job-type scheduler states in plugin monitor

* pb/plugin: rename protobuf package to plugin

* admin/plugin: rename pluginRuntime wiring to plugin

* admin/plugin: remove runtime naming from plugin APIs and UI

* admin/plugin: rename runtime files to plugin naming

* admin/plugin: persist jobs and activities for monitor recovery

* admin/plugin: lease one detector worker per job type

* admin/ui: show worker load from plugin heartbeats

* admin/plugin: skip stale workers for detector and executor picks

* plugin/worker: add plugin worker command and stream runtime scaffold

* plugin/worker: implement vacuum detect and execute handlers

* admin/plugin: document external vacuum plugin worker starter

* command: update plugin.worker help to reflect implemented flow

* command/admin: drop legacy Plugin V2 label

* plugin/worker: validate vacuum job type and respect min interval

* plugin/worker: test no-op detect when min interval not elapsed

* command/admin: document plugin.worker external process

* plugin/worker: advertise configured concurrency in hello

* command/plugin.worker: add jobType handler selection

* command/plugin.worker: test handler selection by job type

* command/plugin.worker: persist worker id in workingDir

* admin/plugin: document plugin.worker jobType and workingDir flags

* plugin/worker: support cancel request for in-flight work

* plugin/worker: test cancel request acknowledgements

* command/plugin.worker: document workingDir and jobType behavior

* plugin/worker: emit executor activity events for monitor

* plugin/worker: test executor activity builder

* admin/plugin: send last successful run in detection request

* admin/plugin: send cancel request when detect or execute context ends

* admin/plugin: document worker cancel request responsibility

* admin/handlers: expose plugin scheduler states API in no-auth mode

* admin/handlers: test plugin scheduler states route registration

* admin/plugin: keep worker id on worker-generated activity records

* admin/plugin: test worker id propagation in monitor activities

* admin/dash: always initialize plugin service

* command/admin: remove plugin enable flags and default to enabled

* admin/dash: drop pluginEnabled constructor parameter

* admin/plugin UI: stop checking plugin enabled state

* admin/plugin: remove docs for plugin enable flags

* admin/dash: remove unused plugin enabled check method

* admin/dash: fallback to in-memory plugin init when dataDir fails

* admin/plugin API: expose worker gRPC port in status

* command/plugin.worker: resolve admin gRPC port via plugin status

* split plugin UI into overview/configuration/monitoring pages

* Update layout_templ.go

* add volume_balance plugin worker handler

* wire plugin.worker CLI for volume_balance job type

* add erasure_coding plugin worker handler

* wire plugin.worker CLI for erasure_coding job type

* support multi-job handlers in plugin worker runtime

* allow plugin.worker jobType as comma-separated list

* admin/plugin UI: rename to Workers and simplify config view

* plugin worker: queue detection requests instead of capacity reject

* Update plugin_worker.go

* plugin volume_balance: remove force_move/timeout from worker config UI

* plugin erasure_coding: enforce local working dir and cleanup

* admin/plugin UI: rename admin settings to job scheduling

* admin/plugin UI: persist and robustly render detection results

* admin/plugin: record and return detection trace metadata

* admin/plugin UI: show detection process and decision trace

* plugin: surface detector decision trace as activities

* mini: start a plugin worker by default

* admin/plugin UI: split monitoring into detection and execution tabs

* plugin worker: emit detection decision trace for EC and balance

* admin workers UI: split monitoring into detection and execution pages

* plugin scheduler: skip proposals for active assigned/running jobs

* admin workers UI: add job queue tab

* plugin worker: add dummy stress detector and executor job type

* admin workers UI: reorder tabs to detection queue execution

* admin workers UI: regenerate plugin template

* plugin defaults: include dummy stress and add stress tests

* plugin dummy stress: rotate detection selections across runs

* plugin scheduler: remove cross-run proposal dedupe

* plugin queue: track pending scheduled jobs

* plugin scheduler: wait for executor capacity before dispatch

* plugin scheduler: skip detection when waiting backlog is high

* plugin: add disk-backed job detail API and persistence

* admin ui: show plugin job detail modal from job id links

* plugin: generate unique job ids instead of reusing proposal ids

* plugin worker: emit heartbeats on work state changes

* plugin registry: round-robin tied executor and detector picks

* add temporary EC overnight stress runner

* plugin job details: persist and render EC execution plans

* ec volume details: color data and parity shard badges

* shard labels: keep parity ids numeric and color-only distinction

* admin: remove legacy maintenance UI routes and templates

* admin: remove dead maintenance endpoint helpers

* Update layout_templ.go

* remove dummy_stress worker and command support

* refactor plugin UI to job-type top tabs and sub-tabs

* migrate weed worker command to plugin runtime

* remove plugin.worker command and keep worker runtime with metrics

* update helm worker args for jobType and execution flags

* set plugin scheduling defaults to global 16 and per-worker 4

* stress: fix RPC context reuse and remove redundant variables in ec_stress_runner

* admin/plugin: fix lifecycle races, safe channel operations, and terminal state constants

* admin/dash: randomize job IDs and fix priority zero-value overwrite in plugin API

* admin/handlers: implement buffered rendering to prevent response corruption

* admin/plugin: implement debounced persistence flusher and optimize BuildJobDetail memory lookups

* admin/plugin: fix priority overwrite and implement bounded wait in scheduler reserve

* admin/plugin: implement atomic file writes and fix run record side effects

* admin/plugin: use P prefix for parity shard labels in execution plans

* admin/plugin: enable parallel execution for cancellation tests

* admin: refactor time.Time fields to pointers for better JSON omitempty support

* admin/plugin: implement pointer-safe time assignments and comparisons in plugin core

* admin/plugin: fix time assignment and sorting logic in plugin monitor after pointer refactor

* admin/plugin: update scheduler activity tracking to use time pointers

* admin/plugin: fix time-based run history trimming after pointer refactor

* admin/dash: fix JobSpec struct literal in plugin API after pointer refactor

* admin/view: add D/P prefixes to EC shard badges for UI consistency

* admin/plugin: use lifecycle-aware context for schema prefetching

* Update ec_volume_details_templ.go

* admin/stress: fix proposal sorting and log volume cleanup errors

* stress: refine ec stress runner with math/rand and collection name

- Added Collection field to VolumeEcShardsDeleteRequest for correct filename construction.
- Replaced crypto/rand with seeded math/rand PRNG for bulk payloads.
- Added documentation for EcMinAge zero-value behavior.
- Added logging for ignored errors in volume/shard deletion.

* admin: return internal server error for plugin store failures

Changed error status code from 400 Bad Request to 500 Internal Server Error for failures in GetPluginJobDetail to correctly reflect server-side errors.

* admin: implement safe channel sends and graceful shutdown sync

- Added sync.WaitGroup to Plugin struct to manage background goroutines.
- Implemented safeSendCh helper using recover() to prevent panics on closed channels.
- Ensured Shutdown() waits for all background operations to complete.

* admin: robustify plugin monitor with nil-safe time and record init

- Standardized nil-safe assignment for *time.Time pointers (CreatedAt, UpdatedAt, CompletedAt).
- Ensured persistJobDetailSnapshot initializes new records correctly if they don't exist on disk.
- Fixed debounced persistence to trigger immediate write on job completion.

* admin: improve scheduler shutdown behavior and logic guards

- Replaced brittle error string matching with explicit r.shutdownCh selection for shutdown detection.
- Removed redundant nil guard in buildScheduledJobSpec.
- Standardized WaitGroup usage for schedulerLoop.

* admin: implement deep copy for job parameters and atomic write fixes

- Implemented deepCopyGenericValue and used it in cloneTrackedJob to prevent shared state.
- Ensured atomicWriteFile creates parent directories before writing.

* admin: remove unreachable branch in shard classification

Removed an unreachable 'totalShards <= 0' check in classifyShardID as dataShards and parityShards are already guarded.

* admin: secure UI links and use canonical shard constants

- Added rel="noopener noreferrer" to external links for security.
- Replaced magic number 14 with erasure_coding.TotalShardsCount.
- Used renderEcShardBadge for missing shard list consistency.

* admin: stabilize plugin tests and fix regressions

- Composed a robust plugin_monitor_test.go to handle asynchronous persistence.
- Updated all time.Time literals to use timeToPtr helper.
- Added explicit Shutdown() calls in tests to synchronize with debounced writes.
- Fixed syntax errors and orphaned struct literals in tests.

* Potential fix for code scanning alert no. 278: Slice memory allocation with excessive size value

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 283: Uncontrolled data used in path expression

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* admin: finalize refinements for error handling, scheduler, and race fixes

- Standardized HTTP 500 status codes for store failures in plugin_api.go.
- Tracked scheduled detection goroutines with sync.WaitGroup for safe shutdown.
- Fixed race condition in safeSendDetectionComplete by extracting channel under lock.
- Implemented deep copy for JobActivity details.
- Used defaultDirPerm constant in atomicWriteFile.

* test(ec): migrate admin dockertest to plugin APIs

* admin/plugin_api: fix RunPluginJobTypeAPI to return 500 for server-side detection/filter errors

* admin/plugin_api: fix ExecutePluginJobAPI to return 500 for job execution failures

* admin/plugin_api: limit parseProtoJSONBody request body to 1MB to prevent unbounded memory usage

* admin/plugin: consolidate regex to package-level validJobTypePattern; add char validation to sanitizeJobID

* admin/plugin: fix racy Shutdown channel close with sync.Once

* admin/plugin: track sendLoop and recv goroutines in WorkerStream with r.wg

* admin/plugin: document writeProtoFiles atomicity — .pb is source of truth, .json is human-readable only

* admin/plugin: extract activityLess helper to deduplicate nil-safe OccurredAt sort comparators

* test/ec: check http.NewRequest errors to prevent nil req panics

* test/ec: replace deprecated ioutil/math/rand, fix stale step comment 5.1→3.1

* plugin(ec): raise default detection and scheduling throughput limits

* topology: include empty disks in volume list and EC capacity fallback

* topology: remove hard 10-task cap for detection planning

* Update ec_volume_details_templ.go

* adjust default

* fix tests

---------

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
2026-02-18 13:42:41 -08:00

736 lines
21 KiB
Go

package dash
import (
"context"
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/http"
"sort"
"strconv"
"strings"
"time"
"github.com/gin-gonic/gin"
"github.com/seaweedfs/seaweedfs/weed/admin/plugin"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
const (
defaultPluginDetectionTimeout = 45 * time.Second
defaultPluginExecutionTimeout = 90 * time.Second
maxPluginDetectionTimeout = 5 * time.Minute
maxPluginExecutionTimeout = 10 * time.Minute
defaultPluginRunTimeout = 5 * time.Minute
maxPluginRunTimeout = 30 * time.Minute
)
// GetPluginStatusAPI returns plugin status.
func (s *AdminServer) GetPluginStatusAPI(c *gin.Context) {
plugin := s.GetPlugin()
if plugin == nil {
c.JSON(http.StatusOK, gin.H{
"enabled": false,
"worker_grpc_port": s.GetWorkerGrpcPort(),
})
return
}
c.JSON(http.StatusOK, gin.H{
"enabled": true,
"configured": plugin.IsConfigured(),
"base_dir": plugin.BaseDir(),
"worker_count": len(plugin.ListWorkers()),
"worker_grpc_port": s.GetWorkerGrpcPort(),
})
}
// GetPluginWorkersAPI returns currently connected plugin workers.
func (s *AdminServer) GetPluginWorkersAPI(c *gin.Context) {
workers := s.GetPluginWorkers()
if workers == nil {
c.JSON(http.StatusOK, []interface{}{})
return
}
c.JSON(http.StatusOK, workers)
}
// GetPluginJobTypesAPI returns known plugin job types from workers and persisted data.
func (s *AdminServer) GetPluginJobTypesAPI(c *gin.Context) {
jobTypes, err := s.ListPluginJobTypes()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if jobTypes == nil {
c.JSON(http.StatusOK, []interface{}{})
return
}
c.JSON(http.StatusOK, jobTypes)
}
// GetPluginJobsAPI returns tracked jobs for monitoring.
func (s *AdminServer) GetPluginJobsAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Query("job_type"))
state := strings.TrimSpace(c.Query("state"))
limit := parsePositiveInt(c.Query("limit"), 200)
jobs := s.ListPluginJobs(jobType, state, limit)
if jobs == nil {
c.JSON(http.StatusOK, []interface{}{})
return
}
c.JSON(http.StatusOK, jobs)
}
// GetPluginJobAPI returns one tracked job.
func (s *AdminServer) GetPluginJobAPI(c *gin.Context) {
jobID := strings.TrimSpace(c.Param("jobId"))
if jobID == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobId is required"})
return
}
job, found := s.GetPluginJob(jobID)
if !found {
c.JSON(http.StatusNotFound, gin.H{"error": "job not found"})
return
}
c.JSON(http.StatusOK, job)
}
// GetPluginJobDetailAPI returns detailed information for one tracked plugin job.
func (s *AdminServer) GetPluginJobDetailAPI(c *gin.Context) {
jobID := strings.TrimSpace(c.Param("jobId"))
if jobID == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobId is required"})
return
}
activityLimit := parsePositiveInt(c.Query("activity_limit"), 500)
relatedLimit := parsePositiveInt(c.Query("related_limit"), 20)
detail, found, err := s.GetPluginJobDetail(jobID, activityLimit, relatedLimit)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if !found || detail == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "job detail not found"})
return
}
c.JSON(http.StatusOK, detail)
}
// GetPluginActivitiesAPI returns recent plugin activities.
func (s *AdminServer) GetPluginActivitiesAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Query("job_type"))
limit := parsePositiveInt(c.Query("limit"), 500)
activities := s.ListPluginActivities(jobType, limit)
if activities == nil {
c.JSON(http.StatusOK, []interface{}{})
return
}
c.JSON(http.StatusOK, activities)
}
// GetPluginSchedulerStatesAPI returns per-job-type scheduler status for monitoring.
func (s *AdminServer) GetPluginSchedulerStatesAPI(c *gin.Context) {
jobTypeFilter := strings.TrimSpace(c.Query("job_type"))
states, err := s.ListPluginSchedulerStates()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if jobTypeFilter != "" {
filtered := make([]interface{}, 0, len(states))
for _, state := range states {
if state.JobType == jobTypeFilter {
filtered = append(filtered, state)
}
}
c.JSON(http.StatusOK, filtered)
return
}
if states == nil {
c.JSON(http.StatusOK, []interface{}{})
return
}
c.JSON(http.StatusOK, states)
}
// RequestPluginJobTypeSchemaAPI asks a worker for one job type schema.
func (s *AdminServer) RequestPluginJobTypeSchemaAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
forceRefresh := c.DefaultQuery("force_refresh", "false") == "true"
ctx, cancel := context.WithTimeout(c.Request.Context(), defaultPluginDetectionTimeout)
defer cancel()
descriptor, err := s.RequestPluginJobTypeDescriptor(ctx, jobType, forceRefresh)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
renderProtoJSON(c, http.StatusOK, descriptor)
}
// GetPluginJobTypeDescriptorAPI returns persisted descriptor for a job type.
func (s *AdminServer) GetPluginJobTypeDescriptorAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
descriptor, err := s.LoadPluginJobTypeDescriptor(jobType)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if descriptor == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "descriptor not found"})
return
}
renderProtoJSON(c, http.StatusOK, descriptor)
}
// GetPluginJobTypeConfigAPI loads persisted config for a job type.
func (s *AdminServer) GetPluginJobTypeConfigAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
config, err := s.LoadPluginJobTypeConfig(jobType)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if config == nil {
config = &plugin_pb.PersistedJobTypeConfig{
JobType: jobType,
AdminConfigValues: map[string]*plugin_pb.ConfigValue{},
WorkerConfigValues: map[string]*plugin_pb.ConfigValue{},
AdminRuntime: &plugin_pb.AdminRuntimeConfig{},
}
}
renderProtoJSON(c, http.StatusOK, config)
}
// UpdatePluginJobTypeConfigAPI stores persisted config for a job type.
func (s *AdminServer) UpdatePluginJobTypeConfigAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
config := &plugin_pb.PersistedJobTypeConfig{}
if err := parseProtoJSONBody(c, config); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
config.JobType = jobType
if config.UpdatedAt == nil {
config.UpdatedAt = timestamppb.Now()
}
if config.AdminRuntime == nil {
config.AdminRuntime = &plugin_pb.AdminRuntimeConfig{}
}
if config.AdminConfigValues == nil {
config.AdminConfigValues = map[string]*plugin_pb.ConfigValue{}
}
if config.WorkerConfigValues == nil {
config.WorkerConfigValues = map[string]*plugin_pb.ConfigValue{}
}
username := c.GetString("username")
if username == "" {
username = "admin"
}
config.UpdatedBy = username
if err := s.SavePluginJobTypeConfig(config); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
renderProtoJSON(c, http.StatusOK, config)
}
// GetPluginRunHistoryAPI returns bounded run history for a job type.
func (s *AdminServer) GetPluginRunHistoryAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
history, err := s.GetPluginRunHistory(jobType)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if history == nil {
c.JSON(http.StatusOK, gin.H{
"job_type": jobType,
"successful_runs": []interface{}{},
"error_runs": []interface{}{},
"last_updated_time": nil,
})
return
}
c.JSON(http.StatusOK, history)
}
// TriggerPluginDetectionAPI runs one detector for this job type and returns proposals.
func (s *AdminServer) TriggerPluginDetectionAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
var req struct {
ClusterContext json.RawMessage `json:"cluster_context"`
MaxResults int32 `json:"max_results"`
TimeoutSeconds int `json:"timeout_seconds"`
}
if err := c.ShouldBindJSON(&req); err != nil && err != io.EOF {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body: " + err.Error()})
return
}
clusterContext, err := s.parseOrBuildClusterContext(req.ClusterContext)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
timeout := normalizeTimeout(req.TimeoutSeconds, defaultPluginDetectionTimeout, maxPluginDetectionTimeout)
ctx, cancel := context.WithTimeout(c.Request.Context(), timeout)
defer cancel()
report, err := s.RunPluginDetectionWithReport(ctx, jobType, clusterContext, req.MaxResults)
proposals := make([]*plugin_pb.JobProposal, 0)
requestID := ""
detectorWorkerID := ""
totalProposals := int32(0)
if report != nil {
proposals = report.Proposals
requestID = report.RequestID
detectorWorkerID = report.WorkerID
if report.Complete != nil {
totalProposals = report.Complete.TotalProposals
}
}
proposalPayloads := make([]map[string]interface{}, 0, len(proposals))
for _, proposal := range proposals {
payload, marshalErr := protoMessageToMap(proposal)
if marshalErr != nil {
glog.Warningf("failed to marshal proposal for jobType=%s: %v", jobType, marshalErr)
continue
}
proposalPayloads = append(proposalPayloads, payload)
}
sort.Slice(proposalPayloads, func(i, j int) bool {
iPriorityStr, _ := proposalPayloads[i]["priority"].(string)
jPriorityStr, _ := proposalPayloads[j]["priority"].(string)
iPriority := plugin_pb.JobPriority_value[iPriorityStr]
jPriority := plugin_pb.JobPriority_value[jPriorityStr]
if iPriority != jPriority {
return iPriority > jPriority
}
iID, _ := proposalPayloads[i]["proposal_id"].(string)
jID, _ := proposalPayloads[j]["proposal_id"].(string)
return iID < jID
})
activities := s.ListPluginActivities(jobType, 500)
filteredActivities := make([]interface{}, 0, len(activities))
if requestID != "" {
for i := len(activities) - 1; i >= 0; i-- {
activity := activities[i]
if activity.RequestID != requestID {
continue
}
filteredActivities = append(filteredActivities, activity)
}
}
response := gin.H{
"job_type": jobType,
"request_id": requestID,
"detector_worker_id": detectorWorkerID,
"total_proposals": totalProposals,
"count": len(proposalPayloads),
"proposals": proposalPayloads,
"activities": filteredActivities,
}
if err != nil {
response["error"] = err.Error()
c.JSON(http.StatusInternalServerError, response)
return
}
c.JSON(http.StatusOK, response)
}
// RunPluginJobTypeAPI runs full workflow for one job type: detect then dispatch detected jobs.
func (s *AdminServer) RunPluginJobTypeAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
var req struct {
ClusterContext json.RawMessage `json:"cluster_context"`
MaxResults int32 `json:"max_results"`
TimeoutSeconds int `json:"timeout_seconds"`
Attempt int32 `json:"attempt"`
}
if err := c.ShouldBindJSON(&req); err != nil && err != io.EOF {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body: " + err.Error()})
return
}
if req.Attempt < 1 {
req.Attempt = 1
}
clusterContext, err := s.parseOrBuildClusterContext(req.ClusterContext)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
timeout := normalizeTimeout(req.TimeoutSeconds, defaultPluginRunTimeout, maxPluginRunTimeout)
ctx, cancel := context.WithTimeout(c.Request.Context(), timeout)
defer cancel()
proposals, err := s.RunPluginDetection(ctx, jobType, clusterContext, req.MaxResults)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
detectedCount := len(proposals)
filteredProposals, skippedActiveCount, err := s.FilterPluginProposalsWithActiveJobs(jobType, proposals)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
type executionResult struct {
JobID string `json:"job_id"`
Success bool `json:"success"`
Error string `json:"error,omitempty"`
Completion map[string]interface{} `json:"completion,omitempty"`
}
results := make([]executionResult, 0, len(filteredProposals))
successCount := 0
errorCount := 0
for index, proposal := range filteredProposals {
job := buildJobSpecFromProposal(jobType, proposal, index)
completed, execErr := s.ExecutePluginJob(ctx, job, clusterContext, req.Attempt)
result := executionResult{
JobID: job.JobId,
Success: execErr == nil,
}
if completed != nil {
if payload, marshalErr := protoMessageToMap(completed); marshalErr == nil {
result.Completion = payload
}
}
if execErr != nil {
result.Error = execErr.Error()
errorCount++
} else {
successCount++
}
results = append(results, result)
}
c.JSON(http.StatusOK, gin.H{
"job_type": jobType,
"detected_count": detectedCount,
"ready_to_execute_count": len(filteredProposals),
"skipped_active_count": skippedActiveCount,
"executed_count": len(results),
"success_count": successCount,
"error_count": errorCount,
"execution_results": results,
})
}
// ExecutePluginJobAPI executes one job on a capable worker and waits for completion.
func (s *AdminServer) ExecutePluginJobAPI(c *gin.Context) {
var req struct {
Job json.RawMessage `json:"job"`
ClusterContext json.RawMessage `json:"cluster_context"`
Attempt int32 `json:"attempt"`
TimeoutSeconds int `json:"timeout_seconds"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body: " + err.Error()})
return
}
if len(req.Job) == 0 {
c.JSON(http.StatusBadRequest, gin.H{"error": "job is required"})
return
}
job := &plugin_pb.JobSpec{}
if err := (protojson.UnmarshalOptions{DiscardUnknown: true}).Unmarshal(req.Job, job); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid job payload: " + err.Error()})
return
}
clusterContext, err := s.parseOrBuildClusterContext(req.ClusterContext)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if req.Attempt < 1 {
req.Attempt = 1
}
timeout := normalizeTimeout(req.TimeoutSeconds, defaultPluginExecutionTimeout, maxPluginExecutionTimeout)
ctx, cancel := context.WithTimeout(c.Request.Context(), timeout)
defer cancel()
completed, err := s.ExecutePluginJob(ctx, job, clusterContext, req.Attempt)
if err != nil {
if completed != nil {
payload, marshalErr := protoMessageToMap(completed)
if marshalErr == nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error(), "completion": payload})
return
}
}
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
renderProtoJSON(c, http.StatusOK, completed)
}
func (s *AdminServer) parseOrBuildClusterContext(raw json.RawMessage) (*plugin_pb.ClusterContext, error) {
if len(raw) == 0 {
return s.buildDefaultPluginClusterContext(), nil
}
contextMessage := &plugin_pb.ClusterContext{}
if err := (protojson.UnmarshalOptions{DiscardUnknown: true}).Unmarshal(raw, contextMessage); err != nil {
return nil, fmt.Errorf("invalid cluster_context payload: %w", err)
}
fallback := s.buildDefaultPluginClusterContext()
if len(contextMessage.MasterGrpcAddresses) == 0 {
contextMessage.MasterGrpcAddresses = append(contextMessage.MasterGrpcAddresses, fallback.MasterGrpcAddresses...)
}
if len(contextMessage.FilerGrpcAddresses) == 0 {
contextMessage.FilerGrpcAddresses = append(contextMessage.FilerGrpcAddresses, fallback.FilerGrpcAddresses...)
}
if len(contextMessage.VolumeGrpcAddresses) == 0 {
contextMessage.VolumeGrpcAddresses = append(contextMessage.VolumeGrpcAddresses, fallback.VolumeGrpcAddresses...)
}
if contextMessage.Metadata == nil {
contextMessage.Metadata = map[string]string{}
}
contextMessage.Metadata["source"] = "admin"
return contextMessage, nil
}
func (s *AdminServer) buildDefaultPluginClusterContext() *plugin_pb.ClusterContext {
clusterContext := &plugin_pb.ClusterContext{
MasterGrpcAddresses: make([]string, 0),
FilerGrpcAddresses: make([]string, 0),
VolumeGrpcAddresses: make([]string, 0),
Metadata: map[string]string{
"source": "admin",
},
}
masterAddress := string(s.masterClient.GetMaster(context.Background()))
if masterAddress != "" {
clusterContext.MasterGrpcAddresses = append(clusterContext.MasterGrpcAddresses, masterAddress)
}
filerSeen := map[string]struct{}{}
for _, filer := range s.GetAllFilers() {
filer = strings.TrimSpace(filer)
if filer == "" {
continue
}
if _, exists := filerSeen[filer]; exists {
continue
}
filerSeen[filer] = struct{}{}
clusterContext.FilerGrpcAddresses = append(clusterContext.FilerGrpcAddresses, filer)
}
volumeSeen := map[string]struct{}{}
if volumeServers, err := s.GetClusterVolumeServers(); err == nil {
for _, server := range volumeServers.VolumeServers {
address := strings.TrimSpace(server.GetDisplayAddress())
if address == "" {
address = strings.TrimSpace(server.Address)
}
if address == "" {
continue
}
if _, exists := volumeSeen[address]; exists {
continue
}
volumeSeen[address] = struct{}{}
clusterContext.VolumeGrpcAddresses = append(clusterContext.VolumeGrpcAddresses, address)
}
} else {
glog.V(1).Infof("failed to build default plugin volume context: %v", err)
}
sort.Strings(clusterContext.MasterGrpcAddresses)
sort.Strings(clusterContext.FilerGrpcAddresses)
sort.Strings(clusterContext.VolumeGrpcAddresses)
return clusterContext
}
const parseProtoJSONBodyMaxBytes = 1 << 20 // 1 MB
func parseProtoJSONBody(c *gin.Context, message proto.Message) error {
limitedBody := http.MaxBytesReader(c.Writer, c.Request.Body, parseProtoJSONBodyMaxBytes)
data, err := io.ReadAll(limitedBody)
if err != nil {
return fmt.Errorf("failed to read request body: %w", err)
}
if len(data) == 0 {
return fmt.Errorf("request body is empty")
}
if err := (protojson.UnmarshalOptions{DiscardUnknown: true}).Unmarshal(data, message); err != nil {
return fmt.Errorf("invalid protobuf json: %w", err)
}
return nil
}
func renderProtoJSON(c *gin.Context, statusCode int, message proto.Message) {
payload, err := protojson.MarshalOptions{
UseProtoNames: true,
EmitUnpopulated: true,
}.Marshal(message)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to encode response: " + err.Error()})
return
}
c.Data(statusCode, "application/json", payload)
}
func protoMessageToMap(message proto.Message) (map[string]interface{}, error) {
payload, err := protojson.MarshalOptions{UseProtoNames: true}.Marshal(message)
if err != nil {
return nil, err
}
out := map[string]interface{}{}
if err := json.Unmarshal(payload, &out); err != nil {
return nil, err
}
return out, nil
}
func normalizeTimeout(timeoutSeconds int, defaultTimeout, maxTimeout time.Duration) time.Duration {
if timeoutSeconds <= 0 {
return defaultTimeout
}
timeout := time.Duration(timeoutSeconds) * time.Second
if timeout > maxTimeout {
return maxTimeout
}
return timeout
}
func buildJobSpecFromProposal(jobType string, proposal *plugin_pb.JobProposal, index int) *plugin_pb.JobSpec {
now := timestamppb.Now()
suffix := make([]byte, 4)
if _, err := rand.Read(suffix); err != nil {
// Fallback to simpler ID if rand fails
suffix = []byte(fmt.Sprintf("%d", index))
}
jobID := fmt.Sprintf("%s-%d-%s", jobType, now.AsTime().UnixNano(), hex.EncodeToString(suffix))
jobSpec := &plugin_pb.JobSpec{
JobId: jobID,
JobType: jobType,
Priority: plugin_pb.JobPriority_JOB_PRIORITY_NORMAL,
CreatedAt: now,
Labels: make(map[string]string),
Parameters: make(map[string]*plugin_pb.ConfigValue),
DedupeKey: "",
}
if proposal != nil {
jobSpec.Summary = proposal.Summary
jobSpec.Detail = proposal.Detail
if proposal.Priority != plugin_pb.JobPriority_JOB_PRIORITY_UNSPECIFIED {
jobSpec.Priority = proposal.Priority
}
jobSpec.DedupeKey = proposal.DedupeKey
jobSpec.Parameters = plugin.CloneConfigValueMap(proposal.Parameters)
if proposal.Labels != nil {
for k, v := range proposal.Labels {
jobSpec.Labels[k] = v
}
}
}
return jobSpec
}
func parsePositiveInt(raw string, defaultValue int) int {
value, err := strconv.Atoi(strings.TrimSpace(raw))
if err != nil || value <= 0 {
return defaultValue
}
return value
}
// cloneConfigValueMap is now exported by the plugin package as CloneConfigValueMap