Refactor plugin system and migrate worker runtime (#8369)

* admin: add plugin runtime UI page and route wiring

* pb: add plugin gRPC contract and generated bindings

* admin/plugin: implement worker registry, runtime, monitoring, and config store

* admin/dash: wire plugin runtime and expose plugin workflow APIs

* command: add flags to enable plugin runtime

* admin: rename remaining plugin v2 wording to plugin

* admin/plugin: add detectable job type registry helper

* admin/plugin: add scheduled detection and dispatch orchestration

* admin/plugin: prefetch job type descriptors when workers connect

* admin/plugin: add known job type discovery API and UI

* admin/plugin: refresh design doc to match current implementation

* admin/plugin: enforce per-worker scheduler concurrency limits

* admin/plugin: use descriptor runtime defaults for scheduler policy

* admin/ui: auto-load first known plugin job type on page open

* admin/plugin: bootstrap persisted config from descriptor defaults

* admin/plugin: dedupe scheduled proposals by dedupe key

* admin/ui: add job type and state filters for plugin monitoring

* admin/ui: add per-job-type plugin activity summary

* admin/plugin: split descriptor read API from schema refresh

* admin/ui: keep plugin summary metrics global while tables are filtered

* admin/plugin: retry executor reservation before timing out

* admin/plugin: expose scheduler states for monitoring

* admin/ui: show per-job-type scheduler states in plugin monitor

* pb/plugin: rename protobuf package to plugin

* admin/plugin: rename pluginRuntime wiring to plugin

* admin/plugin: remove runtime naming from plugin APIs and UI

* admin/plugin: rename runtime files to plugin naming

* admin/plugin: persist jobs and activities for monitor recovery

* admin/plugin: lease one detector worker per job type

* admin/ui: show worker load from plugin heartbeats

* admin/plugin: skip stale workers for detector and executor picks

* plugin/worker: add plugin worker command and stream runtime scaffold

* plugin/worker: implement vacuum detect and execute handlers

* admin/plugin: document external vacuum plugin worker starter

* command: update plugin.worker help to reflect implemented flow

* command/admin: drop legacy Plugin V2 label

* plugin/worker: validate vacuum job type and respect min interval

* plugin/worker: test no-op detect when min interval not elapsed

* command/admin: document plugin.worker external process

* plugin/worker: advertise configured concurrency in hello

* command/plugin.worker: add jobType handler selection

* command/plugin.worker: test handler selection by job type

* command/plugin.worker: persist worker id in workingDir

* admin/plugin: document plugin.worker jobType and workingDir flags

* plugin/worker: support cancel request for in-flight work

* plugin/worker: test cancel request acknowledgements

* command/plugin.worker: document workingDir and jobType behavior

* plugin/worker: emit executor activity events for monitor

* plugin/worker: test executor activity builder

* admin/plugin: send last successful run in detection request

* admin/plugin: send cancel request when detect or execute context ends

* admin/plugin: document worker cancel request responsibility

* admin/handlers: expose plugin scheduler states API in no-auth mode

* admin/handlers: test plugin scheduler states route registration

* admin/plugin: keep worker id on worker-generated activity records

* admin/plugin: test worker id propagation in monitor activities

* admin/dash: always initialize plugin service

* command/admin: remove plugin enable flags and default to enabled

* admin/dash: drop pluginEnabled constructor parameter

* admin/plugin UI: stop checking plugin enabled state

* admin/plugin: remove docs for plugin enable flags

* admin/dash: remove unused plugin enabled check method

* admin/dash: fallback to in-memory plugin init when dataDir fails

* admin/plugin API: expose worker gRPC port in status

* command/plugin.worker: resolve admin gRPC port via plugin status

* split plugin UI into overview/configuration/monitoring pages

* Update layout_templ.go

* add volume_balance plugin worker handler

* wire plugin.worker CLI for volume_balance job type

* add erasure_coding plugin worker handler

* wire plugin.worker CLI for erasure_coding job type

* support multi-job handlers in plugin worker runtime

* allow plugin.worker jobType as comma-separated list

* admin/plugin UI: rename to Workers and simplify config view

* plugin worker: queue detection requests instead of capacity reject

* Update plugin_worker.go

* plugin volume_balance: remove force_move/timeout from worker config UI

* plugin erasure_coding: enforce local working dir and cleanup

* admin/plugin UI: rename admin settings to job scheduling

* admin/plugin UI: persist and robustly render detection results

* admin/plugin: record and return detection trace metadata

* admin/plugin UI: show detection process and decision trace

* plugin: surface detector decision trace as activities

* mini: start a plugin worker by default

* admin/plugin UI: split monitoring into detection and execution tabs

* plugin worker: emit detection decision trace for EC and balance

* admin workers UI: split monitoring into detection and execution pages

* plugin scheduler: skip proposals for active assigned/running jobs

* admin workers UI: add job queue tab

* plugin worker: add dummy stress detector and executor job type

* admin workers UI: reorder tabs to detection queue execution

* admin workers UI: regenerate plugin template

* plugin defaults: include dummy stress and add stress tests

* plugin dummy stress: rotate detection selections across runs

* plugin scheduler: remove cross-run proposal dedupe

* plugin queue: track pending scheduled jobs

* plugin scheduler: wait for executor capacity before dispatch

* plugin scheduler: skip detection when waiting backlog is high

* plugin: add disk-backed job detail API and persistence

* admin ui: show plugin job detail modal from job id links

* plugin: generate unique job ids instead of reusing proposal ids

* plugin worker: emit heartbeats on work state changes

* plugin registry: round-robin tied executor and detector picks

* add temporary EC overnight stress runner

* plugin job details: persist and render EC execution plans

* ec volume details: color data and parity shard badges

* shard labels: keep parity ids numeric and color-only distinction

* admin: remove legacy maintenance UI routes and templates

* admin: remove dead maintenance endpoint helpers

* Update layout_templ.go

* remove dummy_stress worker and command support

* refactor plugin UI to job-type top tabs and sub-tabs

* migrate weed worker command to plugin runtime

* remove plugin.worker command and keep worker runtime with metrics

* update helm worker args for jobType and execution flags

* set plugin scheduling defaults to global 16 and per-worker 4

* stress: fix RPC context reuse and remove redundant variables in ec_stress_runner

* admin/plugin: fix lifecycle races, safe channel operations, and terminal state constants

* admin/dash: randomize job IDs and fix priority zero-value overwrite in plugin API

* admin/handlers: implement buffered rendering to prevent response corruption

* admin/plugin: implement debounced persistence flusher and optimize BuildJobDetail memory lookups

* admin/plugin: fix priority overwrite and implement bounded wait in scheduler reserve

* admin/plugin: implement atomic file writes and fix run record side effects

* admin/plugin: use P prefix for parity shard labels in execution plans

* admin/plugin: enable parallel execution for cancellation tests

* admin: refactor time.Time fields to pointers for better JSON omitempty support

* admin/plugin: implement pointer-safe time assignments and comparisons in plugin core

* admin/plugin: fix time assignment and sorting logic in plugin monitor after pointer refactor

* admin/plugin: update scheduler activity tracking to use time pointers

* admin/plugin: fix time-based run history trimming after pointer refactor

* admin/dash: fix JobSpec struct literal in plugin API after pointer refactor

* admin/view: add D/P prefixes to EC shard badges for UI consistency

* admin/plugin: use lifecycle-aware context for schema prefetching

* Update ec_volume_details_templ.go

* admin/stress: fix proposal sorting and log volume cleanup errors

* stress: refine ec stress runner with math/rand and collection name

- Added Collection field to VolumeEcShardsDeleteRequest for correct filename construction.
- Replaced crypto/rand with seeded math/rand PRNG for bulk payloads.
- Added documentation for EcMinAge zero-value behavior.
- Added logging for ignored errors in volume/shard deletion.

* admin: return internal server error for plugin store failures

Changed error status code from 400 Bad Request to 500 Internal Server Error for failures in GetPluginJobDetail to correctly reflect server-side errors.

* admin: implement safe channel sends and graceful shutdown sync

- Added sync.WaitGroup to Plugin struct to manage background goroutines.
- Implemented safeSendCh helper using recover() to prevent panics on closed channels.
- Ensured Shutdown() waits for all background operations to complete.

* admin: robustify plugin monitor with nil-safe time and record init

- Standardized nil-safe assignment for *time.Time pointers (CreatedAt, UpdatedAt, CompletedAt).
- Ensured persistJobDetailSnapshot initializes new records correctly if they don't exist on disk.
- Fixed debounced persistence to trigger immediate write on job completion.

* admin: improve scheduler shutdown behavior and logic guards

- Replaced brittle error string matching with explicit r.shutdownCh selection for shutdown detection.
- Removed redundant nil guard in buildScheduledJobSpec.
- Standardized WaitGroup usage for schedulerLoop.

* admin: implement deep copy for job parameters and atomic write fixes

- Implemented deepCopyGenericValue and used it in cloneTrackedJob to prevent shared state.
- Ensured atomicWriteFile creates parent directories before writing.

* admin: remove unreachable branch in shard classification

Removed an unreachable 'totalShards <= 0' check in classifyShardID as dataShards and parityShards are already guarded.

* admin: secure UI links and use canonical shard constants

- Added rel="noopener noreferrer" to external links for security.
- Replaced magic number 14 with erasure_coding.TotalShardsCount.
- Used renderEcShardBadge for missing shard list consistency.

* admin: stabilize plugin tests and fix regressions

- Composed a robust plugin_monitor_test.go to handle asynchronous persistence.
- Updated all time.Time literals to use timeToPtr helper.
- Added explicit Shutdown() calls in tests to synchronize with debounced writes.
- Fixed syntax errors and orphaned struct literals in tests.

* Potential fix for code scanning alert no. 278: Slice memory allocation with excessive size value

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 283: Uncontrolled data used in path expression

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* admin: finalize refinements for error handling, scheduler, and race fixes

- Standardized HTTP 500 status codes for store failures in plugin_api.go.
- Tracked scheduled detection goroutines with sync.WaitGroup for safe shutdown.
- Fixed race condition in safeSendDetectionComplete by extracting channel under lock.
- Implemented deep copy for JobActivity details.
- Used defaultDirPerm constant in atomicWriteFile.

* test(ec): migrate admin dockertest to plugin APIs

* admin/plugin_api: fix RunPluginJobTypeAPI to return 500 for server-side detection/filter errors

* admin/plugin_api: fix ExecutePluginJobAPI to return 500 for job execution failures

* admin/plugin_api: limit parseProtoJSONBody request body to 1MB to prevent unbounded memory usage

* admin/plugin: consolidate regex to package-level validJobTypePattern; add char validation to sanitizeJobID

* admin/plugin: fix racy Shutdown channel close with sync.Once

* admin/plugin: track sendLoop and recv goroutines in WorkerStream with r.wg

* admin/plugin: document writeProtoFiles atomicity — .pb is source of truth, .json is human-readable only

* admin/plugin: extract activityLess helper to deduplicate nil-safe OccurredAt sort comparators

* test/ec: check http.NewRequest errors to prevent nil req panics

* test/ec: replace deprecated ioutil/math/rand, fix stale step comment 5.1→3.1

* plugin(ec): raise default detection and scheduling throughput limits

* topology: include empty disks in volume list and EC capacity fallback

* topology: remove hard 10-task cap for detection planning

* Update ec_volume_details_templ.go

* adjust default

* fix tests

---------

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
This commit is contained in:
Chris Lu
2026-02-18 13:42:41 -08:00
committed by GitHub
parent 5463038760
commit 8ec9ff4a12
82 changed files with 23419 additions and 11389 deletions

View File

@@ -117,6 +117,12 @@ var cmdAdmin = &Command{
- TLS is automatically used if certificates are configured
- Workers fall back to insecure connections if TLS is unavailable
Plugin:
- Always enabled on the worker gRPC port
- Registers plugin.proto gRPC service on the same worker gRPC port
- External workers connect with: weed worker -admin=<admin_host:admin_port>
- Persists plugin metadata under dataDir/plugin when dataDir is configured
Configuration File:
- The security.toml file is read from ".", "$HOME/.seaweedfs/",
"/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order
@@ -197,6 +203,7 @@ func runAdmin(cmd *Command, args []string) bool {
} else {
fmt.Printf("Authentication: Disabled\n")
}
fmt.Printf("Plugin: Enabled\n")
// Set up graceful shutdown
ctx, cancel := context.WithCancel(context.Background())
@@ -295,7 +302,7 @@ func startAdminServer(ctx context.Context, options AdminOptions, enableUI bool,
r.StaticFS("/static", http.FS(staticFS))
}
// Create admin server
// Create admin server (plugin is always enabled)
adminServer := dash.NewAdminServer(*options.master, nil, dataDir, icebergPort)
// Show discovered filers

View File

@@ -0,0 +1,48 @@
package command
import (
"strings"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// parseCapabilities converts comma-separated legacy maintenance capabilities to task types.
// This remains for mini-mode maintenance worker wiring.
func parseCapabilities(capabilityStr string) []types.TaskType {
if capabilityStr == "" {
return nil
}
capabilityMap := map[string]types.TaskType{}
typesRegistry := tasks.GetGlobalTypesRegistry()
for taskType := range typesRegistry.GetAllDetectors() {
capabilityMap[strings.ToLower(string(taskType))] = taskType
}
if taskType, exists := capabilityMap["erasure_coding"]; exists {
capabilityMap["ec"] = taskType
}
if taskType, exists := capabilityMap["remote_upload"]; exists {
capabilityMap["remote"] = taskType
}
if taskType, exists := capabilityMap["fix_replication"]; exists {
capabilityMap["replication"] = taskType
}
var capabilities []types.TaskType
parts := strings.Split(capabilityStr, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
if taskType, exists := capabilityMap[part]; exists {
capabilities = append(capabilities, taskType)
} else {
glog.Warningf("Unknown capability: %s", part)
}
}
return capabilities
}

View File

@@ -13,11 +13,13 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
pluginworker "github.com/seaweedfs/seaweedfs/weed/plugin/worker"
"github.com/seaweedfs/seaweedfs/weed/security"
stats_collect "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util"
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
"github.com/seaweedfs/seaweedfs/weed/util/grace"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"github.com/seaweedfs/seaweedfs/weed/worker"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
@@ -43,6 +45,7 @@ const (
defaultMiniVolumeSizeMB = 128 // Default volume size for mini mode
maxVolumeSizeMB = 1024 // Maximum volume size in MB (1GB)
GrpcPortOffset = 10000 // Offset used to calculate gRPC port from HTTP port
defaultMiniPluginJobTypes = "vacuum,volume_balance,erasure_coding"
)
var (
@@ -1028,6 +1031,7 @@ func startMiniAdminWithWorker(allServicesReady chan struct{}) {
// Start worker after admin server is ready
startMiniWorker()
startMiniPluginWorker(ctx)
// Wait for worker to be ready by polling its gRPC port
workerGrpcAddr := fmt.Sprintf("%s:%d", bindIp, *miniAdminOptions.grpcPort)
@@ -1165,6 +1169,62 @@ func startMiniWorker() {
glog.Infof("Maintenance worker %s started successfully", workerInstance.ID())
}
func startMiniPluginWorker(ctx context.Context) {
glog.Infof("Starting plugin worker for admin server")
adminAddr := fmt.Sprintf("%s:%d", *miniIp, *miniAdminOptions.port)
resolvedAdminAddr := resolvePluginWorkerAdminServer(adminAddr)
if resolvedAdminAddr != adminAddr {
glog.Infof("Resolved mini plugin worker admin endpoint: %s -> %s", adminAddr, resolvedAdminAddr)
}
workerDir := filepath.Join(*miniDataFolders, "plugin_worker")
if err := os.MkdirAll(workerDir, 0755); err != nil {
glog.Fatalf("Failed to create plugin worker directory: %v", err)
}
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.worker")
handlers, err := buildPluginWorkerHandlers(defaultMiniPluginJobTypes, grpcDialOption)
if err != nil {
glog.Fatalf("Failed to build mini plugin worker handlers: %v", err)
}
workerID, err := resolvePluginWorkerID("", workerDir)
if err != nil {
glog.Fatalf("Failed to resolve mini plugin worker ID: %v", err)
}
pluginRuntime, err := pluginworker.NewWorker(pluginworker.WorkerOptions{
AdminServer: resolvedAdminAddr,
WorkerID: workerID,
WorkerVersion: version.Version(),
WorkerAddress: *miniIp,
HeartbeatInterval: 15 * time.Second,
ReconnectDelay: 5 * time.Second,
MaxDetectionConcurrency: 1,
MaxExecutionConcurrency: 2,
GrpcDialOption: grpcDialOption,
Handlers: handlers,
})
if err != nil {
glog.Fatalf("Failed to create mini plugin worker: %v", err)
}
go func() {
runCtx := ctx
if runCtx == nil {
runCtx = context.Background()
}
if runErr := pluginRuntime.Run(runCtx); runErr != nil && runCtx.Err() == nil {
glog.Errorf("Mini plugin worker stopped with error: %v", runErr)
}
}()
glog.Infof("Plugin worker %s started successfully with job types: %s", workerID, defaultMiniPluginJobTypes)
}
const credentialsInstructionTemplate = `
To create S3 credentials, you have two options:

View File

@@ -0,0 +1,13 @@
package command
import "testing"
func TestMiniDefaultPluginJobTypes(t *testing.T) {
jobTypes, err := parsePluginWorkerJobTypes(defaultMiniPluginJobTypes)
if err != nil {
t.Fatalf("parsePluginWorkerJobTypes(mini default) err = %v", err)
}
if len(jobTypes) != 3 {
t.Fatalf("expected mini default job types to include 3 handlers, got %v", jobTypes)
}
}

View File

@@ -0,0 +1,238 @@
package command
import (
"fmt"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
func TestBuildPluginWorkerHandler(t *testing.T) {
dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
handler, err := buildPluginWorkerHandler("vacuum", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandler(vacuum) err = %v", err)
}
if handler == nil {
t.Fatalf("expected non-nil handler")
}
handler, err = buildPluginWorkerHandler("", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandler(default) err = %v", err)
}
if handler == nil {
t.Fatalf("expected non-nil default handler")
}
handler, err = buildPluginWorkerHandler("volume_balance", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandler(volume_balance) err = %v", err)
}
if handler == nil {
t.Fatalf("expected non-nil volume_balance handler")
}
handler, err = buildPluginWorkerHandler("balance", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandler(balance alias) err = %v", err)
}
if handler == nil {
t.Fatalf("expected non-nil balance alias handler")
}
handler, err = buildPluginWorkerHandler("erasure_coding", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandler(erasure_coding) err = %v", err)
}
if handler == nil {
t.Fatalf("expected non-nil erasure_coding handler")
}
handler, err = buildPluginWorkerHandler("ec", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandler(ec alias) err = %v", err)
}
if handler == nil {
t.Fatalf("expected non-nil ec alias handler")
}
_, err = buildPluginWorkerHandler("unknown", dialOption)
if err == nil {
t.Fatalf("expected unsupported job type error")
}
}
func TestBuildPluginWorkerHandlers(t *testing.T) {
dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
handlers, err := buildPluginWorkerHandlers("vacuum,volume_balance,erasure_coding", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandlers(list) err = %v", err)
}
if len(handlers) != 3 {
t.Fatalf("expected 3 handlers, got %d", len(handlers))
}
handlers, err = buildPluginWorkerHandlers("balance,ec,vacuum,balance", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandlers(aliases) err = %v", err)
}
if len(handlers) != 3 {
t.Fatalf("expected deduped 3 handlers, got %d", len(handlers))
}
_, err = buildPluginWorkerHandlers("unknown,vacuum", dialOption)
if err == nil {
t.Fatalf("expected unsupported job type error")
}
}
func TestParsePluginWorkerJobTypes(t *testing.T) {
jobTypes, err := parsePluginWorkerJobTypes("")
if err != nil {
t.Fatalf("parsePluginWorkerJobTypes(default) err = %v", err)
}
if len(jobTypes) != 1 || jobTypes[0] != "vacuum" {
t.Fatalf("expected default [vacuum], got %v", jobTypes)
}
jobTypes, err = parsePluginWorkerJobTypes(" volume_balance , ec , vacuum , volume_balance ")
if err != nil {
t.Fatalf("parsePluginWorkerJobTypes(list) err = %v", err)
}
if len(jobTypes) != 3 {
t.Fatalf("expected 3 deduped job types, got %d (%v)", len(jobTypes), jobTypes)
}
if jobTypes[0] != "volume_balance" || jobTypes[1] != "erasure_coding" || jobTypes[2] != "vacuum" {
t.Fatalf("unexpected parsed order %v", jobTypes)
}
if _, err = parsePluginWorkerJobTypes(" , "); err != nil {
t.Fatalf("expected empty list to resolve to default vacuum: %v", err)
}
}
func TestPluginWorkerDefaultJobTypes(t *testing.T) {
jobTypes, err := parsePluginWorkerJobTypes(defaultPluginWorkerJobTypes)
if err != nil {
t.Fatalf("parsePluginWorkerJobTypes(default setting) err = %v", err)
}
if len(jobTypes) != 3 {
t.Fatalf("expected default job types to include 3 handlers, got %v", jobTypes)
}
}
func TestResolvePluginWorkerID(t *testing.T) {
dir := t.TempDir()
explicit, err := resolvePluginWorkerID("worker-x", dir)
if err != nil {
t.Fatalf("resolvePluginWorkerID(explicit) err = %v", err)
}
if explicit != "worker-x" {
t.Fatalf("expected explicit id, got %q", explicit)
}
generated, err := resolvePluginWorkerID("", dir)
if err != nil {
t.Fatalf("resolvePluginWorkerID(generate) err = %v", err)
}
if generated == "" {
t.Fatalf("expected generated id")
}
if len(generated) < 7 || generated[:7] != "plugin-" {
t.Fatalf("expected generated id prefix plugin-, got %q", generated)
}
persistedPath := filepath.Join(dir, "plugin.worker.id")
if _, statErr := os.Stat(persistedPath); statErr != nil {
t.Fatalf("expected persisted worker id file: %v", statErr)
}
reused, err := resolvePluginWorkerID("", dir)
if err != nil {
t.Fatalf("resolvePluginWorkerID(reuse) err = %v", err)
}
if reused != generated {
t.Fatalf("expected reused id %q, got %q", generated, reused)
}
}
func TestParsePluginWorkerAdminAddress(t *testing.T) {
host, httpPort, hasExplicitGrpcPort, err := parsePluginWorkerAdminAddress("localhost:23646")
if err != nil {
t.Fatalf("parsePluginWorkerAdminAddress(localhost:23646) err = %v", err)
}
if host != "localhost" || httpPort != 23646 || hasExplicitGrpcPort {
t.Fatalf("unexpected parse result: host=%q httpPort=%d hasExplicit=%v", host, httpPort, hasExplicitGrpcPort)
}
host, httpPort, hasExplicitGrpcPort, err = parsePluginWorkerAdminAddress("localhost:23646.33646")
if err != nil {
t.Fatalf("parsePluginWorkerAdminAddress(localhost:23646.33646) err = %v", err)
}
if host != "localhost" || httpPort != 23646 || !hasExplicitGrpcPort {
t.Fatalf("unexpected dotted parse result: host=%q httpPort=%d hasExplicit=%v", host, httpPort, hasExplicitGrpcPort)
}
if _, _, _, err = parsePluginWorkerAdminAddress("localhost"); err == nil {
t.Fatalf("expected parse error for invalid address")
}
}
func TestResolvePluginWorkerAdminServerUsesStatusGrpcPort(t *testing.T) {
const grpcPort = 35432
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/api/plugin/status" {
http.NotFound(w, r)
return
}
_, _ = w.Write([]byte(fmt.Sprintf(`{"worker_grpc_port":%d}`, grpcPort)))
}))
defer server.Close()
adminAddress := strings.TrimPrefix(server.URL, "http://")
host, httpPort, _, err := parsePluginWorkerAdminAddress(adminAddress)
if err != nil {
t.Fatalf("parsePluginWorkerAdminAddress(%s) err = %v", adminAddress, err)
}
resolved := resolvePluginWorkerAdminServer(adminAddress)
expected := fmt.Sprintf("%s:%d.%d", host, httpPort, grpcPort)
if resolved != expected {
t.Fatalf("unexpected resolved admin address: got=%q want=%q", resolved, expected)
}
}
func TestResolvePluginWorkerAdminServerKeepsDefaultGrpcOffset(t *testing.T) {
var server *httptest.Server
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/api/plugin/status" {
http.NotFound(w, r)
return
}
address := strings.TrimPrefix(server.URL, "http://")
_, httpPort, _, parseErr := parsePluginWorkerAdminAddress(address)
if parseErr != nil {
http.Error(w, parseErr.Error(), http.StatusInternalServerError)
return
}
_, _ = w.Write([]byte(fmt.Sprintf(`{"worker_grpc_port":%d}`, httpPort+10000)))
}))
defer server.Close()
adminAddress := strings.TrimPrefix(server.URL, "http://")
resolved := resolvePluginWorkerAdminServer(adminAddress)
if resolved != adminAddress {
t.Fatalf("expected admin address to remain unchanged, got=%q want=%q", resolved, adminAddress)
}
}

View File

@@ -1,76 +1,54 @@
package command
import (
"net/http"
"os"
"os/signal"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/security"
statsCollect "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/grace"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"github.com/seaweedfs/seaweedfs/weed/worker"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
// Import task packages to trigger their auto-registration
_ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
_ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
_ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
// TODO: Implement additional task packages (add to default capabilities when ready):
// _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/remote" - for uploading volumes to remote/cloud storage
// _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/replication" - for fixing replication issues and maintaining data consistency
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var cmdWorker = &Command{
UsageLine: "worker -admin=<admin_server> [-capabilities=<task_types>] [-maxConcurrent=<num>] [-workingDir=<path>] [-metricsPort=<port>] [-debug]",
Short: "start a maintenance worker to process cluster maintenance tasks",
Long: `Start a maintenance worker that connects to an admin server to process
maintenance tasks like vacuum, erasure coding, remote upload, and replication fixes.
UsageLine: "worker -admin=<admin_server> [-id=<worker_id>] [-jobType=vacuum,volume_balance,erasure_coding] [-workingDir=<path>] [-heartbeat=15s] [-reconnect=5s] [-maxDetect=1] [-maxExecute=4] [-metricsPort=<port>] [-metricsIp=<ip>] [-debug]",
Short: "start a plugin.proto worker process",
Long: `Start an external plugin worker using weed/pb/plugin.proto over gRPC.
The worker ID and address are automatically generated.
The worker connects to the admin server via gRPC (admin HTTP port + 10000).
This command provides vacuum, volume_balance, and erasure_coding job type
contracts with the plugin stream runtime, including descriptor delivery,
heartbeat/load reporting, detection, and execution.
Behavior:
- Use -jobType to choose one or more plugin job handlers (comma-separated list)
- Use -workingDir to persist plugin.worker.id for stable worker identity across restarts
- Use -metricsPort/-metricsIp to expose /health, /ready, and /metrics
Examples:
weed worker -admin=localhost:23646
weed worker -admin=admin.example.com:23646
weed worker -admin=localhost:23646 -capabilities=vacuum,replication
weed worker -admin=localhost:23646 -maxConcurrent=4
weed worker -admin=localhost:23646 -workingDir=/tmp/worker
weed worker -admin=localhost:23646 -metricsPort=9327
weed worker -admin=localhost:23646 -debug -debug.port=6060
weed worker -admin=localhost:23646 -jobType=volume_balance
weed worker -admin=localhost:23646 -jobType=vacuum,volume_balance
weed worker -admin=localhost:23646 -jobType=erasure_coding
weed worker -admin=admin.example.com:23646 -id=plugin-vacuum-a -heartbeat=10s
weed worker -admin=localhost:23646 -workingDir=/var/lib/seaweedfs-plugin
weed worker -admin=localhost:23646 -metricsPort=9327 -metricsIp=0.0.0.0
`,
}
var (
workerAdminServer = cmdWorker.Flag.String("admin", "localhost:23646", "admin server address")
workerCapabilities = cmdWorker.Flag.String("capabilities", "vacuum,ec,balance", "comma-separated list of task types this worker can handle")
workerMaxConcurrent = cmdWorker.Flag.Int("maxConcurrent", 2, "maximum number of concurrent tasks")
workerHeartbeatInterval = cmdWorker.Flag.Duration("heartbeat", 30*time.Second, "heartbeat interval")
workerTaskRequestInterval = cmdWorker.Flag.Duration("taskInterval", 5*time.Second, "task request interval")
workerWorkingDir = cmdWorker.Flag.String("workingDir", "", "working directory for the worker")
workerMetricsPort = cmdWorker.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
workerMetricsIp = cmdWorker.Flag.String("metricsIp", "0.0.0.0", "Prometheus metrics listen IP")
workerDebug = cmdWorker.Flag.Bool("debug", false, "serves runtime profiling data via pprof on the port specified by -debug.port")
workerDebugPort = cmdWorker.Flag.Int("debug.port", 6060, "http port for debugging")
workerServerHeader = "SeaweedFS Worker " + version.VERSION
workerAdminServer = cmdWorker.Flag.String("admin", "localhost:23646", "admin server address")
workerID = cmdWorker.Flag.String("id", "", "worker ID (auto-generated when empty)")
workerWorkingDir = cmdWorker.Flag.String("workingDir", "", "working directory for persistent worker state")
workerJobType = cmdWorker.Flag.String("jobType", defaultPluginWorkerJobTypes, "job types to serve (comma-separated list)")
workerHeartbeat = cmdWorker.Flag.Duration("heartbeat", 15*time.Second, "heartbeat interval")
workerReconnect = cmdWorker.Flag.Duration("reconnect", 5*time.Second, "reconnect delay")
workerMaxDetect = cmdWorker.Flag.Int("maxDetect", 1, "max concurrent detection requests")
workerMaxExecute = cmdWorker.Flag.Int("maxExecute", 4, "max concurrent execute requests")
workerAddress = cmdWorker.Flag.String("address", "", "worker address advertised to admin")
workerMetricsPort = cmdWorker.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
workerMetricsIp = cmdWorker.Flag.String("metricsIp", "0.0.0.0", "Prometheus metrics listen IP")
workerDebug = cmdWorker.Flag.Bool("debug", false, "serves runtime profiling data via pprof on the port specified by -debug.port")
workerDebugPort = cmdWorker.Flag.Int("debug.port", 6060, "http port for debugging")
)
func init() {
cmdWorker.Run = runWorker
// Set default capabilities from registered task types
// This happens after package imports have triggered auto-registration
tasks.SetDefaultCapabilitiesFromRegistry()
}
func runWorker(cmd *Command, args []string) bool {
@@ -78,218 +56,17 @@ func runWorker(cmd *Command, args []string) bool {
grace.StartDebugServer(*workerDebugPort)
}
util.LoadConfiguration("security", false)
glog.Infof("Starting maintenance worker")
glog.Infof("Admin server: %s", *workerAdminServer)
glog.Infof("Capabilities: %s", *workerCapabilities)
// Parse capabilities
capabilities := parseCapabilities(*workerCapabilities)
if len(capabilities) == 0 {
glog.Fatalf("No valid capabilities specified")
return false
}
// Set working directory and create task-specific subdirectories
var baseWorkingDir string
if *workerWorkingDir != "" {
glog.Infof("Setting working directory to: %s", *workerWorkingDir)
if err := os.Chdir(*workerWorkingDir); err != nil {
glog.Fatalf("Failed to change working directory: %v", err)
return false
}
wd, err := os.Getwd()
if err != nil {
glog.Fatalf("Failed to get working directory: %v", err)
return false
}
baseWorkingDir = wd
glog.Infof("Current working directory: %s", baseWorkingDir)
} else {
// Use default working directory when not specified
wd, err := os.Getwd()
if err != nil {
glog.Fatalf("Failed to get current working directory: %v", err)
return false
}
baseWorkingDir = wd
glog.Infof("Using current working directory: %s", baseWorkingDir)
}
// Create task-specific subdirectories
for _, capability := range capabilities {
taskDir := filepath.Join(baseWorkingDir, string(capability))
if err := os.MkdirAll(taskDir, 0755); err != nil {
glog.Fatalf("Failed to create task directory %s: %v", taskDir, err)
return false
}
glog.Infof("Created task directory: %s", taskDir)
}
// Create gRPC dial option using TLS configuration
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.worker")
// Create worker configuration
config := &types.WorkerConfig{
AdminServer: *workerAdminServer,
Capabilities: capabilities,
MaxConcurrent: *workerMaxConcurrent,
HeartbeatInterval: *workerHeartbeatInterval,
TaskRequestInterval: *workerTaskRequestInterval,
BaseWorkingDir: baseWorkingDir,
GrpcDialOption: grpcDialOption,
}
// Create worker instance
workerInstance, err := worker.NewWorker(config)
if err != nil {
glog.Fatalf("Failed to create worker: %v", err)
return false
}
adminClient, err := worker.CreateAdminClient(*workerAdminServer, workerInstance.ID(), grpcDialOption)
if err != nil {
glog.Fatalf("Failed to create admin client: %v", err)
return false
}
// Set admin client
workerInstance.SetAdminClient(adminClient)
// Set working directory
if *workerWorkingDir != "" {
glog.Infof("Setting working directory to: %s", *workerWorkingDir)
if err := os.Chdir(*workerWorkingDir); err != nil {
glog.Fatalf("Failed to change working directory: %v", err)
return false
}
wd, err := os.Getwd()
if err != nil {
glog.Fatalf("Failed to get working directory: %v", err)
return false
}
glog.Infof("Current working directory: %s", wd)
}
// Start metrics HTTP server if port is specified
if *workerMetricsPort > 0 {
go startWorkerMetricsServer(*workerMetricsIp, *workerMetricsPort, workerInstance)
}
// Start the worker
err = workerInstance.Start()
if err != nil {
glog.Errorf("Failed to start worker: %v", err)
return false
}
// Set up signal handling
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
glog.Infof("Maintenance worker %s started successfully", workerInstance.ID())
glog.Infof("Press Ctrl+C to stop the worker")
// Wait for shutdown signal
<-sigChan
glog.Infof("Shutdown signal received, stopping worker...")
// Gracefully stop the worker
err = workerInstance.Stop()
if err != nil {
glog.Errorf("Error stopping worker: %v", err)
}
glog.Infof("Worker stopped")
return true
}
// parseCapabilities converts comma-separated capability string to task types
func parseCapabilities(capabilityStr string) []types.TaskType {
if capabilityStr == "" {
return nil
}
capabilityMap := map[string]types.TaskType{}
// Populate capabilityMap with registered task types
typesRegistry := tasks.GetGlobalTypesRegistry()
for taskType := range typesRegistry.GetAllDetectors() {
// Use the task type string directly as the key
capabilityMap[strings.ToLower(string(taskType))] = taskType
}
// Add common aliases for convenience
if taskType, exists := capabilityMap["erasure_coding"]; exists {
capabilityMap["ec"] = taskType
}
if taskType, exists := capabilityMap["remote_upload"]; exists {
capabilityMap["remote"] = taskType
}
if taskType, exists := capabilityMap["fix_replication"]; exists {
capabilityMap["replication"] = taskType
}
var capabilities []types.TaskType
parts := strings.Split(capabilityStr, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
if taskType, exists := capabilityMap[part]; exists {
capabilities = append(capabilities, taskType)
} else {
glog.Warningf("Unknown capability: %s", part)
}
}
return capabilities
}
// Legacy compatibility types for backward compatibility
// These will be deprecated in future versions
// WorkerStatus represents the current status of a worker (deprecated)
type WorkerStatus struct {
WorkerID string `json:"worker_id"`
Address string `json:"address"`
Status string `json:"status"`
Capabilities []types.TaskType `json:"capabilities"`
MaxConcurrent int `json:"max_concurrent"`
CurrentLoad int `json:"current_load"`
LastHeartbeat time.Time `json:"last_heartbeat"`
CurrentTasks []types.Task `json:"current_tasks"`
Uptime time.Duration `json:"uptime"`
TasksCompleted int `json:"tasks_completed"`
TasksFailed int `json:"tasks_failed"`
}
func workerHealthHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", workerServerHeader)
w.WriteHeader(http.StatusOK)
}
func workerReadyHandler(workerInstance *worker.Worker) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", workerServerHeader)
admin := workerInstance.GetAdmin()
if admin == nil || !admin.IsConnected() {
w.WriteHeader(http.StatusServiceUnavailable)
return
}
w.WriteHeader(http.StatusOK)
}
}
func startWorkerMetricsServer(ip string, port int, w *worker.Worker) {
mux := http.NewServeMux()
mux.HandleFunc("/health", workerHealthHandler)
mux.HandleFunc("/ready", workerReadyHandler(w))
mux.Handle("/metrics", promhttp.HandlerFor(statsCollect.Gather, promhttp.HandlerOpts{}))
glog.V(0).Infof("Starting worker metrics server at %s", statsCollect.JoinHostPort(ip, port))
if err := http.ListenAndServe(statsCollect.JoinHostPort(ip, port), mux); err != nil {
glog.Errorf("Worker metrics server failed to start: %v", err)
}
return runPluginWorkerWithOptions(pluginWorkerRunOptions{
AdminServer: *workerAdminServer,
WorkerID: *workerID,
WorkingDir: *workerWorkingDir,
JobTypes: *workerJobType,
Heartbeat: *workerHeartbeat,
Reconnect: *workerReconnect,
MaxDetect: *workerMaxDetect,
MaxExecute: *workerMaxExecute,
Address: *workerAddress,
MetricsPort: *workerMetricsPort,
MetricsIP: *workerMetricsIp,
})
}

View File

@@ -0,0 +1,348 @@
package command
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/seaweedfs/seaweedfs/weed/glog"
pluginworker "github.com/seaweedfs/seaweedfs/weed/plugin/worker"
"github.com/seaweedfs/seaweedfs/weed/security"
statsCollect "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"google.golang.org/grpc"
)
const defaultPluginWorkerJobTypes = "vacuum,volume_balance,erasure_coding"
type pluginWorkerRunOptions struct {
AdminServer string
WorkerID string
WorkingDir string
JobTypes string
Heartbeat time.Duration
Reconnect time.Duration
MaxDetect int
MaxExecute int
Address string
MetricsPort int
MetricsIP string
}
func runPluginWorkerWithOptions(options pluginWorkerRunOptions) bool {
util.LoadConfiguration("security", false)
options.AdminServer = strings.TrimSpace(options.AdminServer)
if options.AdminServer == "" {
options.AdminServer = "localhost:23646"
}
options.JobTypes = strings.TrimSpace(options.JobTypes)
if options.JobTypes == "" {
options.JobTypes = defaultPluginWorkerJobTypes
}
if options.Heartbeat <= 0 {
options.Heartbeat = 15 * time.Second
}
if options.Reconnect <= 0 {
options.Reconnect = 5 * time.Second
}
if options.MaxDetect <= 0 {
options.MaxDetect = 1
}
if options.MaxExecute <= 0 {
options.MaxExecute = 4
}
options.MetricsIP = strings.TrimSpace(options.MetricsIP)
if options.MetricsIP == "" {
options.MetricsIP = "0.0.0.0"
}
resolvedAdminServer := resolvePluginWorkerAdminServer(options.AdminServer)
if resolvedAdminServer != options.AdminServer {
fmt.Printf("Resolved admin worker gRPC endpoint: %s -> %s\n", options.AdminServer, resolvedAdminServer)
}
dialOption := security.LoadClientTLS(util.GetViper(), "grpc.worker")
workerID, err := resolvePluginWorkerID(options.WorkerID, options.WorkingDir)
if err != nil {
glog.Errorf("Failed to resolve plugin worker ID: %v", err)
return false
}
handlers, err := buildPluginWorkerHandlers(options.JobTypes, dialOption)
if err != nil {
glog.Errorf("Failed to build plugin worker handlers: %v", err)
return false
}
worker, err := pluginworker.NewWorker(pluginworker.WorkerOptions{
AdminServer: resolvedAdminServer,
WorkerID: workerID,
WorkerVersion: version.Version(),
WorkerAddress: options.Address,
HeartbeatInterval: options.Heartbeat,
ReconnectDelay: options.Reconnect,
MaxDetectionConcurrency: options.MaxDetect,
MaxExecutionConcurrency: options.MaxExecute,
GrpcDialOption: dialOption,
Handlers: handlers,
})
if err != nil {
glog.Errorf("Failed to create plugin worker: %v", err)
return false
}
if options.MetricsPort > 0 {
go startPluginWorkerMetricsServer(options.MetricsIP, options.MetricsPort, worker)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(sigCh)
go func() {
sig := <-sigCh
fmt.Printf("\nReceived signal %v, stopping plugin worker...\n", sig)
cancel()
}()
fmt.Printf("Starting plugin worker (admin=%s)\n", resolvedAdminServer)
if err := worker.Run(ctx); err != nil {
glog.Errorf("Plugin worker stopped with error: %v", err)
return false
}
fmt.Println("Plugin worker stopped")
return true
}
func resolvePluginWorkerID(explicitID string, workingDir string) (string, error) {
id := strings.TrimSpace(explicitID)
if id != "" {
return id, nil
}
workingDir = strings.TrimSpace(workingDir)
if workingDir == "" {
return "", nil
}
if err := os.MkdirAll(workingDir, 0755); err != nil {
return "", err
}
workerIDPath := filepath.Join(workingDir, "plugin.worker.id")
if data, err := os.ReadFile(workerIDPath); err == nil {
if persisted := strings.TrimSpace(string(data)); persisted != "" {
return persisted, nil
}
}
generated := fmt.Sprintf("plugin-%d", time.Now().UnixNano())
if err := os.WriteFile(workerIDPath, []byte(generated+"\n"), 0644); err != nil {
return "", err
}
return generated, nil
}
func buildPluginWorkerHandler(jobType string, dialOption grpc.DialOption) (pluginworker.JobHandler, error) {
canonicalJobType, err := canonicalPluginWorkerJobType(jobType)
if err != nil {
return nil, err
}
switch canonicalJobType {
case "vacuum":
return pluginworker.NewVacuumHandler(dialOption), nil
case "volume_balance":
return pluginworker.NewVolumeBalanceHandler(dialOption), nil
case "erasure_coding":
return pluginworker.NewErasureCodingHandler(dialOption), nil
default:
return nil, fmt.Errorf("unsupported plugin job type %q", canonicalJobType)
}
}
func buildPluginWorkerHandlers(jobTypes string, dialOption grpc.DialOption) ([]pluginworker.JobHandler, error) {
parsedJobTypes, err := parsePluginWorkerJobTypes(jobTypes)
if err != nil {
return nil, err
}
handlers := make([]pluginworker.JobHandler, 0, len(parsedJobTypes))
for _, jobType := range parsedJobTypes {
handler, buildErr := buildPluginWorkerHandler(jobType, dialOption)
if buildErr != nil {
return nil, buildErr
}
handlers = append(handlers, handler)
}
return handlers, nil
}
func parsePluginWorkerJobTypes(jobTypes string) ([]string, error) {
jobTypes = strings.TrimSpace(jobTypes)
if jobTypes == "" {
return []string{"vacuum"}, nil
}
parts := strings.Split(jobTypes, ",")
parsed := make([]string, 0, len(parts))
seen := make(map[string]struct{}, len(parts))
for _, part := range parts {
part = strings.TrimSpace(part)
if part == "" {
continue
}
canonical, err := canonicalPluginWorkerJobType(part)
if err != nil {
return nil, err
}
if _, found := seen[canonical]; found {
continue
}
seen[canonical] = struct{}{}
parsed = append(parsed, canonical)
}
if len(parsed) == 0 {
return []string{"vacuum"}, nil
}
return parsed, nil
}
func canonicalPluginWorkerJobType(jobType string) (string, error) {
switch strings.ToLower(strings.TrimSpace(jobType)) {
case "", "vacuum":
return "vacuum", nil
case "volume_balance", "balance", "volume.balance", "volume-balance":
return "volume_balance", nil
case "erasure_coding", "erasure-coding", "erasure.coding", "ec":
return "erasure_coding", nil
default:
return "", fmt.Errorf("unsupported plugin job type %q", jobType)
}
}
func resolvePluginWorkerAdminServer(adminServer string) string {
adminServer = strings.TrimSpace(adminServer)
host, httpPort, hasExplicitGrpcPort, err := parsePluginWorkerAdminAddress(adminServer)
if err != nil || hasExplicitGrpcPort {
return adminServer
}
workerGrpcPort, err := fetchPluginWorkerGrpcPort(host, httpPort)
if err != nil || workerGrpcPort <= 0 {
return adminServer
}
// Keep canonical host:http form when admin gRPC follows the default +10000 rule.
if workerGrpcPort == httpPort+10000 {
return adminServer
}
return fmt.Sprintf("%s:%d.%d", host, httpPort, workerGrpcPort)
}
func parsePluginWorkerAdminAddress(adminServer string) (host string, httpPort int, hasExplicitGrpcPort bool, err error) {
adminServer = strings.TrimSpace(adminServer)
colonIndex := strings.LastIndex(adminServer, ":")
if colonIndex <= 0 || colonIndex >= len(adminServer)-1 {
return "", 0, false, fmt.Errorf("invalid admin address %q", adminServer)
}
host = adminServer[:colonIndex]
portPart := adminServer[colonIndex+1:]
if dotIndex := strings.LastIndex(portPart, "."); dotIndex > 0 && dotIndex < len(portPart)-1 {
if _, parseErr := strconv.Atoi(portPart[dotIndex+1:]); parseErr == nil {
hasExplicitGrpcPort = true
portPart = portPart[:dotIndex]
}
}
httpPort, err = strconv.Atoi(portPart)
if err != nil || httpPort <= 0 {
return "", 0, false, fmt.Errorf("invalid admin http port in %q", adminServer)
}
return host, httpPort, hasExplicitGrpcPort, nil
}
func fetchPluginWorkerGrpcPort(host string, httpPort int) (int, error) {
client := &http.Client{Timeout: 2 * time.Second}
address := util.JoinHostPort(host, httpPort)
var lastErr error
for _, scheme := range []string{"http", "https"} {
statusURL := fmt.Sprintf("%s://%s/api/plugin/status", scheme, address)
resp, err := client.Get(statusURL)
if err != nil {
lastErr = err
continue
}
var payload struct {
WorkerGrpcPort int `json:"worker_grpc_port"`
}
decodeErr := json.NewDecoder(resp.Body).Decode(&payload)
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
lastErr = fmt.Errorf("status code %d from %s", resp.StatusCode, statusURL)
continue
}
if decodeErr != nil {
lastErr = fmt.Errorf("decode plugin status from %s: %w", statusURL, decodeErr)
continue
}
if payload.WorkerGrpcPort <= 0 {
lastErr = fmt.Errorf("plugin status from %s returned empty worker_grpc_port", statusURL)
continue
}
return payload.WorkerGrpcPort, nil
}
if lastErr == nil {
lastErr = fmt.Errorf("plugin status endpoint unavailable")
}
return 0, lastErr
}
func pluginWorkerHealthHandler(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
}
func pluginWorkerReadyHandler(pluginRuntime *pluginworker.Worker) http.HandlerFunc {
return func(w http.ResponseWriter, _ *http.Request) {
if pluginRuntime == nil || !pluginRuntime.IsConnected() {
w.WriteHeader(http.StatusServiceUnavailable)
return
}
w.WriteHeader(http.StatusOK)
}
}
func startPluginWorkerMetricsServer(ip string, port int, pluginRuntime *pluginworker.Worker) {
mux := http.NewServeMux()
mux.HandleFunc("/health", pluginWorkerHealthHandler)
mux.HandleFunc("/ready", pluginWorkerReadyHandler(pluginRuntime))
mux.Handle("/metrics", promhttp.HandlerFor(statsCollect.Gather, promhttp.HandlerOpts{}))
glog.V(0).Infof("Starting plugin worker metrics server at %s", statsCollect.JoinHostPort(ip, port))
if err := http.ListenAndServe(statsCollect.JoinHostPort(ip, port), mux); err != nil {
glog.Errorf("Plugin worker metrics server failed to start: %v", err)
}
}

View File

@@ -0,0 +1,13 @@
package command
import "testing"
func TestWorkerDefaultJobTypes(t *testing.T) {
jobTypes, err := parsePluginWorkerJobTypes(*workerJobType)
if err != nil {
t.Fatalf("parsePluginWorkerJobTypes(default worker flag) err = %v", err)
}
if len(jobTypes) != 3 {
t.Fatalf("expected default worker job types to include 3 handlers, got %v", jobTypes)
}
}