Refactor plugin system and migrate worker runtime (#8369)

* admin: add plugin runtime UI page and route wiring

* pb: add plugin gRPC contract and generated bindings

* admin/plugin: implement worker registry, runtime, monitoring, and config store

* admin/dash: wire plugin runtime and expose plugin workflow APIs

* command: add flags to enable plugin runtime

* admin: rename remaining plugin v2 wording to plugin

* admin/plugin: add detectable job type registry helper

* admin/plugin: add scheduled detection and dispatch orchestration

* admin/plugin: prefetch job type descriptors when workers connect

* admin/plugin: add known job type discovery API and UI

* admin/plugin: refresh design doc to match current implementation

* admin/plugin: enforce per-worker scheduler concurrency limits

* admin/plugin: use descriptor runtime defaults for scheduler policy

* admin/ui: auto-load first known plugin job type on page open

* admin/plugin: bootstrap persisted config from descriptor defaults

* admin/plugin: dedupe scheduled proposals by dedupe key

* admin/ui: add job type and state filters for plugin monitoring

* admin/ui: add per-job-type plugin activity summary

* admin/plugin: split descriptor read API from schema refresh

* admin/ui: keep plugin summary metrics global while tables are filtered

* admin/plugin: retry executor reservation before timing out

* admin/plugin: expose scheduler states for monitoring

* admin/ui: show per-job-type scheduler states in plugin monitor

* pb/plugin: rename protobuf package to plugin

* admin/plugin: rename pluginRuntime wiring to plugin

* admin/plugin: remove runtime naming from plugin APIs and UI

* admin/plugin: rename runtime files to plugin naming

* admin/plugin: persist jobs and activities for monitor recovery

* admin/plugin: lease one detector worker per job type

* admin/ui: show worker load from plugin heartbeats

* admin/plugin: skip stale workers for detector and executor picks

* plugin/worker: add plugin worker command and stream runtime scaffold

* plugin/worker: implement vacuum detect and execute handlers

* admin/plugin: document external vacuum plugin worker starter

* command: update plugin.worker help to reflect implemented flow

* command/admin: drop legacy Plugin V2 label

* plugin/worker: validate vacuum job type and respect min interval

* plugin/worker: test no-op detect when min interval not elapsed

* command/admin: document plugin.worker external process

* plugin/worker: advertise configured concurrency in hello

* command/plugin.worker: add jobType handler selection

* command/plugin.worker: test handler selection by job type

* command/plugin.worker: persist worker id in workingDir

* admin/plugin: document plugin.worker jobType and workingDir flags

* plugin/worker: support cancel request for in-flight work

* plugin/worker: test cancel request acknowledgements

* command/plugin.worker: document workingDir and jobType behavior

* plugin/worker: emit executor activity events for monitor

* plugin/worker: test executor activity builder

* admin/plugin: send last successful run in detection request

* admin/plugin: send cancel request when detect or execute context ends

* admin/plugin: document worker cancel request responsibility

* admin/handlers: expose plugin scheduler states API in no-auth mode

* admin/handlers: test plugin scheduler states route registration

* admin/plugin: keep worker id on worker-generated activity records

* admin/plugin: test worker id propagation in monitor activities

* admin/dash: always initialize plugin service

* command/admin: remove plugin enable flags and default to enabled

* admin/dash: drop pluginEnabled constructor parameter

* admin/plugin UI: stop checking plugin enabled state

* admin/plugin: remove docs for plugin enable flags

* admin/dash: remove unused plugin enabled check method

* admin/dash: fallback to in-memory plugin init when dataDir fails

* admin/plugin API: expose worker gRPC port in status

* command/plugin.worker: resolve admin gRPC port via plugin status

* split plugin UI into overview/configuration/monitoring pages

* Update layout_templ.go

* add volume_balance plugin worker handler

* wire plugin.worker CLI for volume_balance job type

* add erasure_coding plugin worker handler

* wire plugin.worker CLI for erasure_coding job type

* support multi-job handlers in plugin worker runtime

* allow plugin.worker jobType as comma-separated list

* admin/plugin UI: rename to Workers and simplify config view

* plugin worker: queue detection requests instead of capacity reject

* Update plugin_worker.go

* plugin volume_balance: remove force_move/timeout from worker config UI

* plugin erasure_coding: enforce local working dir and cleanup

* admin/plugin UI: rename admin settings to job scheduling

* admin/plugin UI: persist and robustly render detection results

* admin/plugin: record and return detection trace metadata

* admin/plugin UI: show detection process and decision trace

* plugin: surface detector decision trace as activities

* mini: start a plugin worker by default

* admin/plugin UI: split monitoring into detection and execution tabs

* plugin worker: emit detection decision trace for EC and balance

* admin workers UI: split monitoring into detection and execution pages

* plugin scheduler: skip proposals for active assigned/running jobs

* admin workers UI: add job queue tab

* plugin worker: add dummy stress detector and executor job type

* admin workers UI: reorder tabs to detection queue execution

* admin workers UI: regenerate plugin template

* plugin defaults: include dummy stress and add stress tests

* plugin dummy stress: rotate detection selections across runs

* plugin scheduler: remove cross-run proposal dedupe

* plugin queue: track pending scheduled jobs

* plugin scheduler: wait for executor capacity before dispatch

* plugin scheduler: skip detection when waiting backlog is high

* plugin: add disk-backed job detail API and persistence

* admin ui: show plugin job detail modal from job id links

* plugin: generate unique job ids instead of reusing proposal ids

* plugin worker: emit heartbeats on work state changes

* plugin registry: round-robin tied executor and detector picks

* add temporary EC overnight stress runner

* plugin job details: persist and render EC execution plans

* ec volume details: color data and parity shard badges

* shard labels: keep parity ids numeric and color-only distinction

* admin: remove legacy maintenance UI routes and templates

* admin: remove dead maintenance endpoint helpers

* Update layout_templ.go

* remove dummy_stress worker and command support

* refactor plugin UI to job-type top tabs and sub-tabs

* migrate weed worker command to plugin runtime

* remove plugin.worker command and keep worker runtime with metrics

* update helm worker args for jobType and execution flags

* set plugin scheduling defaults to global 16 and per-worker 4

* stress: fix RPC context reuse and remove redundant variables in ec_stress_runner

* admin/plugin: fix lifecycle races, safe channel operations, and terminal state constants

* admin/dash: randomize job IDs and fix priority zero-value overwrite in plugin API

* admin/handlers: implement buffered rendering to prevent response corruption

* admin/plugin: implement debounced persistence flusher and optimize BuildJobDetail memory lookups

* admin/plugin: fix priority overwrite and implement bounded wait in scheduler reserve

* admin/plugin: implement atomic file writes and fix run record side effects

* admin/plugin: use P prefix for parity shard labels in execution plans

* admin/plugin: enable parallel execution for cancellation tests

* admin: refactor time.Time fields to pointers for better JSON omitempty support

* admin/plugin: implement pointer-safe time assignments and comparisons in plugin core

* admin/plugin: fix time assignment and sorting logic in plugin monitor after pointer refactor

* admin/plugin: update scheduler activity tracking to use time pointers

* admin/plugin: fix time-based run history trimming after pointer refactor

* admin/dash: fix JobSpec struct literal in plugin API after pointer refactor

* admin/view: add D/P prefixes to EC shard badges for UI consistency

* admin/plugin: use lifecycle-aware context for schema prefetching

* Update ec_volume_details_templ.go

* admin/stress: fix proposal sorting and log volume cleanup errors

* stress: refine ec stress runner with math/rand and collection name

- Added Collection field to VolumeEcShardsDeleteRequest for correct filename construction.
- Replaced crypto/rand with seeded math/rand PRNG for bulk payloads.
- Added documentation for EcMinAge zero-value behavior.
- Added logging for ignored errors in volume/shard deletion.

* admin: return internal server error for plugin store failures

Changed error status code from 400 Bad Request to 500 Internal Server Error for failures in GetPluginJobDetail to correctly reflect server-side errors.

* admin: implement safe channel sends and graceful shutdown sync

- Added sync.WaitGroup to Plugin struct to manage background goroutines.
- Implemented safeSendCh helper using recover() to prevent panics on closed channels.
- Ensured Shutdown() waits for all background operations to complete.

* admin: robustify plugin monitor with nil-safe time and record init

- Standardized nil-safe assignment for *time.Time pointers (CreatedAt, UpdatedAt, CompletedAt).
- Ensured persistJobDetailSnapshot initializes new records correctly if they don't exist on disk.
- Fixed debounced persistence to trigger immediate write on job completion.

* admin: improve scheduler shutdown behavior and logic guards

- Replaced brittle error string matching with explicit r.shutdownCh selection for shutdown detection.
- Removed redundant nil guard in buildScheduledJobSpec.
- Standardized WaitGroup usage for schedulerLoop.

* admin: implement deep copy for job parameters and atomic write fixes

- Implemented deepCopyGenericValue and used it in cloneTrackedJob to prevent shared state.
- Ensured atomicWriteFile creates parent directories before writing.

* admin: remove unreachable branch in shard classification

Removed an unreachable 'totalShards <= 0' check in classifyShardID as dataShards and parityShards are already guarded.

* admin: secure UI links and use canonical shard constants

- Added rel="noopener noreferrer" to external links for security.
- Replaced magic number 14 with erasure_coding.TotalShardsCount.
- Used renderEcShardBadge for missing shard list consistency.

* admin: stabilize plugin tests and fix regressions

- Composed a robust plugin_monitor_test.go to handle asynchronous persistence.
- Updated all time.Time literals to use timeToPtr helper.
- Added explicit Shutdown() calls in tests to synchronize with debounced writes.
- Fixed syntax errors and orphaned struct literals in tests.

* Potential fix for code scanning alert no. 278: Slice memory allocation with excessive size value

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 283: Uncontrolled data used in path expression

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* admin: finalize refinements for error handling, scheduler, and race fixes

- Standardized HTTP 500 status codes for store failures in plugin_api.go.
- Tracked scheduled detection goroutines with sync.WaitGroup for safe shutdown.
- Fixed race condition in safeSendDetectionComplete by extracting channel under lock.
- Implemented deep copy for JobActivity details.
- Used defaultDirPerm constant in atomicWriteFile.

* test(ec): migrate admin dockertest to plugin APIs

* admin/plugin_api: fix RunPluginJobTypeAPI to return 500 for server-side detection/filter errors

* admin/plugin_api: fix ExecutePluginJobAPI to return 500 for job execution failures

* admin/plugin_api: limit parseProtoJSONBody request body to 1MB to prevent unbounded memory usage

* admin/plugin: consolidate regex to package-level validJobTypePattern; add char validation to sanitizeJobID

* admin/plugin: fix racy Shutdown channel close with sync.Once

* admin/plugin: track sendLoop and recv goroutines in WorkerStream with r.wg

* admin/plugin: document writeProtoFiles atomicity — .pb is source of truth, .json is human-readable only

* admin/plugin: extract activityLess helper to deduplicate nil-safe OccurredAt sort comparators

* test/ec: check http.NewRequest errors to prevent nil req panics

* test/ec: replace deprecated ioutil/math/rand, fix stale step comment 5.1→3.1

* plugin(ec): raise default detection and scheduling throughput limits

* topology: include empty disks in volume list and EC capacity fallback

* topology: remove hard 10-task cap for detection planning

* Update ec_volume_details_templ.go

* adjust default

* fix tests

---------

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
This commit is contained in:
Chris Lu
2026-02-18 13:42:41 -08:00
committed by GitHub
parent 5463038760
commit 8ec9ff4a12
82 changed files with 23419 additions and 11389 deletions

View File

@@ -0,0 +1,600 @@
package plugin
import (
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
func TestPluginLoadsPersistedMonitorStateOnStart(t *testing.T) {
t.Parallel()
dataDir := t.TempDir()
store, err := NewConfigStore(dataDir)
if err != nil {
t.Fatalf("NewConfigStore: %v", err)
}
seedJobs := []TrackedJob{
{
JobID: "job-seeded",
JobType: "vacuum",
State: "running",
CreatedAt: timeToPtr(time.Now().UTC().Add(-2 * time.Minute)),
UpdatedAt: timeToPtr(time.Now().UTC().Add(-1 * time.Minute)),
},
}
seedActivities := []JobActivity{
{
JobID: "job-seeded",
JobType: "vacuum",
Source: "worker_progress",
Message: "seeded",
OccurredAt: timeToPtr(time.Now().UTC().Add(-30 * time.Second)),
},
}
if err := store.SaveTrackedJobs(seedJobs); err != nil {
t.Fatalf("SaveTrackedJobs: %v", err)
}
if err := store.SaveActivities(seedActivities); err != nil {
t.Fatalf("SaveActivities: %v", err)
}
pluginSvc, err := New(Options{DataDir: dataDir})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
gotJobs := pluginSvc.ListTrackedJobs("", "", 0)
if len(gotJobs) != 1 || gotJobs[0].JobID != "job-seeded" {
t.Fatalf("unexpected loaded jobs: %+v", gotJobs)
}
gotActivities := pluginSvc.ListActivities("", 0)
if len(gotActivities) != 1 || gotActivities[0].Message != "seeded" {
t.Fatalf("unexpected loaded activities: %+v", gotActivities)
}
}
func TestPluginPersistsMonitorStateAfterJobUpdates(t *testing.T) {
t.Parallel()
dataDir := t.TempDir()
pluginSvc, err := New(Options{DataDir: dataDir})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
job := &plugin_pb.JobSpec{
JobId: "job-persist",
JobType: "vacuum",
Summary: "persist test",
}
pluginSvc.trackExecutionStart("req-persist", "worker-a", job, 1)
pluginSvc.trackExecutionCompletion(&plugin_pb.JobCompleted{
RequestId: "req-persist",
JobId: "job-persist",
JobType: "vacuum",
Success: true,
Result: &plugin_pb.JobResult{Summary: "done"},
CompletedAt: timestamppb.New(time.Now().UTC()),
})
pluginSvc.Shutdown()
store, err := NewConfigStore(dataDir)
if err != nil {
t.Fatalf("NewConfigStore: %v", err)
}
trackedJobs, err := store.LoadTrackedJobs()
if err != nil {
t.Fatalf("LoadTrackedJobs: %v", err)
}
if len(trackedJobs) == 0 {
t.Fatalf("expected persisted tracked jobs")
}
found := false
for _, tracked := range trackedJobs {
if tracked.JobID == "job-persist" {
found = true
if tracked.State == "" {
t.Fatalf("persisted job state should not be empty")
}
}
}
if !found {
t.Fatalf("persisted tracked jobs missing job-persist")
}
activities, err := store.LoadActivities()
if err != nil {
t.Fatalf("LoadActivities: %v", err)
}
if len(activities) == 0 {
t.Fatalf("expected persisted activities")
}
}
func TestTrackExecutionQueuedMarksPendingState(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.trackExecutionQueued(&plugin_pb.JobSpec{
JobId: "job-pending-1",
JobType: "vacuum",
DedupeKey: "vacuum:1",
Summary: "pending queue item",
})
jobs := pluginSvc.ListTrackedJobs("vacuum", "", 10)
if len(jobs) != 1 {
t.Fatalf("expected one tracked pending job, got=%d", len(jobs))
}
job := jobs[0]
if job.JobID != "job-pending-1" {
t.Fatalf("unexpected pending job id: %s", job.JobID)
}
if job.State != "job_state_pending" {
t.Fatalf("unexpected pending job state: %s", job.State)
}
if job.Stage != "queued" {
t.Fatalf("unexpected pending job stage: %s", job.Stage)
}
activities := pluginSvc.ListActivities("vacuum", 50)
found := false
for _, activity := range activities {
if activity.JobID == "job-pending-1" && activity.Stage == "queued" && activity.Source == "admin_scheduler" {
found = true
break
}
}
if !found {
t.Fatalf("expected queued activity for pending job")
}
}
func TestHandleJobProgressUpdateCarriesWorkerIDInActivities(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
job := &plugin_pb.JobSpec{
JobId: "job-progress-worker",
JobType: "vacuum",
}
pluginSvc.trackExecutionStart("req-progress-worker", "worker-a", job, 1)
pluginSvc.handleJobProgressUpdate("worker-a", &plugin_pb.JobProgressUpdate{
RequestId: "req-progress-worker",
JobId: "job-progress-worker",
JobType: "vacuum",
State: plugin_pb.JobState_JOB_STATE_RUNNING,
ProgressPercent: 42.0,
Stage: "scan",
Message: "in progress",
Activities: []*plugin_pb.ActivityEvent{
{
Source: plugin_pb.ActivitySource_ACTIVITY_SOURCE_EXECUTOR,
Message: "volume scanned",
Stage: "scan",
},
},
})
activities := pluginSvc.ListActivities("vacuum", 0)
if len(activities) == 0 {
t.Fatalf("expected activity entries")
}
foundProgress := false
foundEvent := false
for _, activity := range activities {
if activity.Source == "worker_progress" && activity.Message == "in progress" {
foundProgress = true
if activity.WorkerID != "worker-a" {
t.Fatalf("worker_progress activity worker mismatch: got=%q want=%q", activity.WorkerID, "worker-a")
}
}
if activity.Message == "volume scanned" {
foundEvent = true
if activity.WorkerID != "worker-a" {
t.Fatalf("worker event worker mismatch: got=%q want=%q", activity.WorkerID, "worker-a")
}
}
}
if !foundProgress {
t.Fatalf("expected worker_progress activity")
}
if !foundEvent {
t.Fatalf("expected worker activity event")
}
}
func TestHandleJobProgressUpdateWithoutJobIDTracksDetectionActivities(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.handleJobProgressUpdate("worker-detector", &plugin_pb.JobProgressUpdate{
RequestId: "detect-req-1",
JobType: "vacuum",
State: plugin_pb.JobState_JOB_STATE_RUNNING,
Stage: "decision_summary",
Message: "VACUUM: No tasks created for 3 volumes",
Activities: []*plugin_pb.ActivityEvent{
{
Source: plugin_pb.ActivitySource_ACTIVITY_SOURCE_DETECTOR,
Stage: "decision_summary",
Message: "VACUUM: No tasks created for 3 volumes",
},
},
})
activities := pluginSvc.ListActivities("vacuum", 0)
if len(activities) == 0 {
t.Fatalf("expected activity entries")
}
foundDetectionProgress := false
foundDetectorEvent := false
for _, activity := range activities {
if activity.RequestID != "detect-req-1" {
continue
}
if activity.Source == "worker_detection" {
foundDetectionProgress = true
if activity.WorkerID != "worker-detector" {
t.Fatalf("worker_detection worker mismatch: got=%q want=%q", activity.WorkerID, "worker-detector")
}
}
if activity.Source == "activity_source_detector" {
foundDetectorEvent = true
if activity.WorkerID != "worker-detector" {
t.Fatalf("detector event worker mismatch: got=%q want=%q", activity.WorkerID, "worker-detector")
}
}
}
if !foundDetectionProgress {
t.Fatalf("expected worker_detection activity")
}
if !foundDetectorEvent {
t.Fatalf("expected detector activity event")
}
}
func TestHandleJobCompletedCarriesWorkerIDInActivitiesAndRunHistory(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
job := &plugin_pb.JobSpec{
JobId: "job-complete-worker",
JobType: "vacuum",
}
pluginSvc.trackExecutionStart("req-complete-worker", "worker-b", job, 1)
pluginSvc.handleJobCompleted(&plugin_pb.JobCompleted{
RequestId: "req-complete-worker",
JobId: "job-complete-worker",
JobType: "vacuum",
Success: true,
Activities: []*plugin_pb.ActivityEvent{
{
Source: plugin_pb.ActivitySource_ACTIVITY_SOURCE_EXECUTOR,
Message: "finalizer done",
Stage: "finalize",
},
},
CompletedAt: timestamppb.Now(),
})
pluginSvc.Shutdown()
activities := pluginSvc.ListActivities("vacuum", 0)
foundWorkerEvent := false
for _, activity := range activities {
if activity.Message == "finalizer done" {
foundWorkerEvent = true
if activity.WorkerID != "worker-b" {
t.Fatalf("worker completion event worker mismatch: got=%q want=%q", activity.WorkerID, "worker-b")
}
}
}
if !foundWorkerEvent {
t.Fatalf("expected completion worker event activity")
}
history, err := pluginSvc.LoadRunHistory("vacuum")
if err != nil {
t.Fatalf("LoadRunHistory: %v", err)
}
if history == nil || len(history.SuccessfulRuns) == 0 {
t.Fatalf("expected successful run history entry")
}
if history.SuccessfulRuns[0].WorkerID != "worker-b" {
t.Fatalf("run history worker mismatch: got=%q want=%q", history.SuccessfulRuns[0].WorkerID, "worker-b")
}
}
func TestTrackExecutionStartStoresJobPayloadDetails(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{DataDir: t.TempDir()})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.trackExecutionStart("req-payload", "worker-c", &plugin_pb.JobSpec{
JobId: "job-payload",
JobType: "vacuum",
Summary: "payload summary",
Detail: "payload detail",
Parameters: map[string]*plugin_pb.ConfigValue{
"volume_id": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 9},
},
},
Labels: map[string]string{
"source": "detector",
},
}, 2)
pluginSvc.Shutdown()
job, found := pluginSvc.GetTrackedJob("job-payload")
if !found || job == nil {
t.Fatalf("expected tracked job")
}
if job.Detail != "" {
t.Fatalf("expected in-memory tracked job detail to be stripped, got=%q", job.Detail)
}
if job.Attempt != 2 {
t.Fatalf("unexpected attempt: %d", job.Attempt)
}
if len(job.Labels) != 0 {
t.Fatalf("expected in-memory labels to be stripped, got=%+v", job.Labels)
}
if len(job.Parameters) != 0 {
t.Fatalf("expected in-memory parameters to be stripped, got=%+v", job.Parameters)
}
detail, found, err := pluginSvc.BuildJobDetail("job-payload", 100, 0)
if err != nil {
t.Fatalf("BuildJobDetail: %v", err)
}
if !found || detail == nil || detail.Job == nil {
t.Fatalf("expected disk-backed job detail")
}
if detail.Job.Detail != "payload detail" {
t.Fatalf("unexpected disk-backed detail: %q", detail.Job.Detail)
}
if got := detail.Job.Labels["source"]; got != "detector" {
t.Fatalf("unexpected disk-backed label source: %q", got)
}
if got, ok := detail.Job.Parameters["volume_id"].(map[string]interface{}); !ok || got["int64_value"] != "9" {
t.Fatalf("unexpected disk-backed parameters payload: %#v", detail.Job.Parameters["volume_id"])
}
}
func TestTrackExecutionStartStoresErasureCodingExecutionPlan(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{DataDir: t.TempDir()})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
taskParams := &worker_pb.TaskParams{
TaskId: "task-ec-1",
VolumeId: 29,
Collection: "photos",
Sources: []*worker_pb.TaskSource{
{
Node: "source-a:8080",
DataCenter: "dc1",
Rack: "rack1",
VolumeId: 29,
},
},
Targets: []*worker_pb.TaskTarget{
{
Node: "target-a:8080",
DataCenter: "dc1",
Rack: "rack2",
VolumeId: 29,
ShardIds: []uint32{0, 10},
},
{
Node: "target-b:8080",
DataCenter: "dc2",
Rack: "rack3",
VolumeId: 29,
ShardIds: []uint32{1, 11},
},
},
TaskParams: &worker_pb.TaskParams_ErasureCodingParams{
ErasureCodingParams: &worker_pb.ErasureCodingTaskParams{
DataShards: 10,
ParityShards: 4,
},
},
}
payload, err := proto.Marshal(taskParams)
if err != nil {
t.Fatalf("Marshal task params: %v", err)
}
pluginSvc.trackExecutionStart("req-ec-plan", "worker-ec", &plugin_pb.JobSpec{
JobId: "job-ec-plan",
JobType: "erasure_coding",
Parameters: map[string]*plugin_pb.ConfigValue{
"task_params_pb": {
Kind: &plugin_pb.ConfigValue_BytesValue{BytesValue: payload},
},
},
}, 1)
pluginSvc.Shutdown()
detail, found, err := pluginSvc.BuildJobDetail("job-ec-plan", 100, 0)
if err != nil {
t.Fatalf("BuildJobDetail: %v", err)
}
if !found || detail == nil || detail.Job == nil {
t.Fatalf("expected disk-backed detail")
}
rawPlan, ok := detail.Job.Parameters["execution_plan"]
if !ok {
t.Fatalf("expected execution_plan in parameters, got=%+v", detail.Job.Parameters)
}
plan, ok := rawPlan.(map[string]interface{})
if !ok {
t.Fatalf("unexpected execution_plan type: %T", rawPlan)
}
if plan["job_type"] != "erasure_coding" {
t.Fatalf("unexpected execution plan job type: %+v", plan["job_type"])
}
if plan["volume_id"] != float64(29) {
t.Fatalf("unexpected execution plan volume id: %+v", plan["volume_id"])
}
targets, ok := plan["targets"].([]interface{})
if !ok || len(targets) != 2 {
t.Fatalf("unexpected targets in execution plan: %+v", plan["targets"])
}
assignments, ok := plan["shard_assignments"].([]interface{})
if !ok || len(assignments) != 4 {
t.Fatalf("unexpected shard assignments in execution plan: %+v", plan["shard_assignments"])
}
firstAssignment, ok := assignments[0].(map[string]interface{})
if !ok {
t.Fatalf("unexpected first assignment payload: %+v", assignments[0])
}
if firstAssignment["shard_id"] != float64(0) || firstAssignment["kind"] != "data" {
t.Fatalf("unexpected first assignment: %+v", firstAssignment)
}
}
func TestBuildJobDetailIncludesActivitiesAndRunRecord(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{DataDir: t.TempDir()})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.trackExecutionStart("req-detail", "worker-z", &plugin_pb.JobSpec{
JobId: "job-detail",
JobType: "vacuum",
Summary: "detail summary",
}, 1)
pluginSvc.handleJobProgressUpdate("worker-z", &plugin_pb.JobProgressUpdate{
RequestId: "req-detail",
JobId: "job-detail",
JobType: "vacuum",
State: plugin_pb.JobState_JOB_STATE_RUNNING,
Stage: "scan",
Message: "scanning volume",
})
pluginSvc.handleJobCompleted(&plugin_pb.JobCompleted{
RequestId: "req-detail",
JobId: "job-detail",
JobType: "vacuum",
Success: true,
Result: &plugin_pb.JobResult{
Summary: "done",
OutputValues: map[string]*plugin_pb.ConfigValue{
"affected": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 1},
},
},
},
CompletedAt: timestamppb.Now(),
})
pluginSvc.Shutdown()
detail, found, err := pluginSvc.BuildJobDetail("job-detail", 100, 5)
if err != nil {
t.Fatalf("BuildJobDetail error: %v", err)
}
if !found || detail == nil {
t.Fatalf("expected job detail")
}
if detail.Job == nil || detail.Job.JobID != "job-detail" {
t.Fatalf("unexpected job detail payload: %+v", detail.Job)
}
if detail.RunRecord == nil || detail.RunRecord.JobID != "job-detail" {
t.Fatalf("expected run record for job-detail, got=%+v", detail.RunRecord)
}
if len(detail.Activities) == 0 {
t.Fatalf("expected activity timeline entries")
}
if detail.Job.ResultOutputValues == nil {
t.Fatalf("expected result output values")
}
}
func TestBuildJobDetailLoadsFromDiskWhenMemoryCleared(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{DataDir: t.TempDir()})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.trackExecutionStart("req-disk", "worker-d", &plugin_pb.JobSpec{
JobId: "job-disk",
JobType: "vacuum",
Summary: "disk summary",
Detail: "disk detail payload",
}, 1)
pluginSvc.Shutdown()
pluginSvc.jobsMu.Lock()
pluginSvc.jobs = map[string]*TrackedJob{}
pluginSvc.jobsMu.Unlock()
pluginSvc.activitiesMu.Lock()
pluginSvc.activities = nil
pluginSvc.activitiesMu.Unlock()
detail, found, err := pluginSvc.BuildJobDetail("job-disk", 100, 0)
if err != nil {
t.Fatalf("BuildJobDetail: %v", err)
}
if !found || detail == nil || detail.Job == nil {
t.Fatalf("expected detail from disk")
}
if detail.Job.Detail != "disk detail payload" {
t.Fatalf("unexpected disk detail payload: %q", detail.Job.Detail)
}
}