Files
seaweedFS/test/erasure_coding/admin_dockertest/ec_integration_test.go
Chris Lu 8ec9ff4a12 Refactor plugin system and migrate worker runtime (#8369)
* admin: add plugin runtime UI page and route wiring

* pb: add plugin gRPC contract and generated bindings

* admin/plugin: implement worker registry, runtime, monitoring, and config store

* admin/dash: wire plugin runtime and expose plugin workflow APIs

* command: add flags to enable plugin runtime

* admin: rename remaining plugin v2 wording to plugin

* admin/plugin: add detectable job type registry helper

* admin/plugin: add scheduled detection and dispatch orchestration

* admin/plugin: prefetch job type descriptors when workers connect

* admin/plugin: add known job type discovery API and UI

* admin/plugin: refresh design doc to match current implementation

* admin/plugin: enforce per-worker scheduler concurrency limits

* admin/plugin: use descriptor runtime defaults for scheduler policy

* admin/ui: auto-load first known plugin job type on page open

* admin/plugin: bootstrap persisted config from descriptor defaults

* admin/plugin: dedupe scheduled proposals by dedupe key

* admin/ui: add job type and state filters for plugin monitoring

* admin/ui: add per-job-type plugin activity summary

* admin/plugin: split descriptor read API from schema refresh

* admin/ui: keep plugin summary metrics global while tables are filtered

* admin/plugin: retry executor reservation before timing out

* admin/plugin: expose scheduler states for monitoring

* admin/ui: show per-job-type scheduler states in plugin monitor

* pb/plugin: rename protobuf package to plugin

* admin/plugin: rename pluginRuntime wiring to plugin

* admin/plugin: remove runtime naming from plugin APIs and UI

* admin/plugin: rename runtime files to plugin naming

* admin/plugin: persist jobs and activities for monitor recovery

* admin/plugin: lease one detector worker per job type

* admin/ui: show worker load from plugin heartbeats

* admin/plugin: skip stale workers for detector and executor picks

* plugin/worker: add plugin worker command and stream runtime scaffold

* plugin/worker: implement vacuum detect and execute handlers

* admin/plugin: document external vacuum plugin worker starter

* command: update plugin.worker help to reflect implemented flow

* command/admin: drop legacy Plugin V2 label

* plugin/worker: validate vacuum job type and respect min interval

* plugin/worker: test no-op detect when min interval not elapsed

* command/admin: document plugin.worker external process

* plugin/worker: advertise configured concurrency in hello

* command/plugin.worker: add jobType handler selection

* command/plugin.worker: test handler selection by job type

* command/plugin.worker: persist worker id in workingDir

* admin/plugin: document plugin.worker jobType and workingDir flags

* plugin/worker: support cancel request for in-flight work

* plugin/worker: test cancel request acknowledgements

* command/plugin.worker: document workingDir and jobType behavior

* plugin/worker: emit executor activity events for monitor

* plugin/worker: test executor activity builder

* admin/plugin: send last successful run in detection request

* admin/plugin: send cancel request when detect or execute context ends

* admin/plugin: document worker cancel request responsibility

* admin/handlers: expose plugin scheduler states API in no-auth mode

* admin/handlers: test plugin scheduler states route registration

* admin/plugin: keep worker id on worker-generated activity records

* admin/plugin: test worker id propagation in monitor activities

* admin/dash: always initialize plugin service

* command/admin: remove plugin enable flags and default to enabled

* admin/dash: drop pluginEnabled constructor parameter

* admin/plugin UI: stop checking plugin enabled state

* admin/plugin: remove docs for plugin enable flags

* admin/dash: remove unused plugin enabled check method

* admin/dash: fallback to in-memory plugin init when dataDir fails

* admin/plugin API: expose worker gRPC port in status

* command/plugin.worker: resolve admin gRPC port via plugin status

* split plugin UI into overview/configuration/monitoring pages

* Update layout_templ.go

* add volume_balance plugin worker handler

* wire plugin.worker CLI for volume_balance job type

* add erasure_coding plugin worker handler

* wire plugin.worker CLI for erasure_coding job type

* support multi-job handlers in plugin worker runtime

* allow plugin.worker jobType as comma-separated list

* admin/plugin UI: rename to Workers and simplify config view

* plugin worker: queue detection requests instead of capacity reject

* Update plugin_worker.go

* plugin volume_balance: remove force_move/timeout from worker config UI

* plugin erasure_coding: enforce local working dir and cleanup

* admin/plugin UI: rename admin settings to job scheduling

* admin/plugin UI: persist and robustly render detection results

* admin/plugin: record and return detection trace metadata

* admin/plugin UI: show detection process and decision trace

* plugin: surface detector decision trace as activities

* mini: start a plugin worker by default

* admin/plugin UI: split monitoring into detection and execution tabs

* plugin worker: emit detection decision trace for EC and balance

* admin workers UI: split monitoring into detection and execution pages

* plugin scheduler: skip proposals for active assigned/running jobs

* admin workers UI: add job queue tab

* plugin worker: add dummy stress detector and executor job type

* admin workers UI: reorder tabs to detection queue execution

* admin workers UI: regenerate plugin template

* plugin defaults: include dummy stress and add stress tests

* plugin dummy stress: rotate detection selections across runs

* plugin scheduler: remove cross-run proposal dedupe

* plugin queue: track pending scheduled jobs

* plugin scheduler: wait for executor capacity before dispatch

* plugin scheduler: skip detection when waiting backlog is high

* plugin: add disk-backed job detail API and persistence

* admin ui: show plugin job detail modal from job id links

* plugin: generate unique job ids instead of reusing proposal ids

* plugin worker: emit heartbeats on work state changes

* plugin registry: round-robin tied executor and detector picks

* add temporary EC overnight stress runner

* plugin job details: persist and render EC execution plans

* ec volume details: color data and parity shard badges

* shard labels: keep parity ids numeric and color-only distinction

* admin: remove legacy maintenance UI routes and templates

* admin: remove dead maintenance endpoint helpers

* Update layout_templ.go

* remove dummy_stress worker and command support

* refactor plugin UI to job-type top tabs and sub-tabs

* migrate weed worker command to plugin runtime

* remove plugin.worker command and keep worker runtime with metrics

* update helm worker args for jobType and execution flags

* set plugin scheduling defaults to global 16 and per-worker 4

* stress: fix RPC context reuse and remove redundant variables in ec_stress_runner

* admin/plugin: fix lifecycle races, safe channel operations, and terminal state constants

* admin/dash: randomize job IDs and fix priority zero-value overwrite in plugin API

* admin/handlers: implement buffered rendering to prevent response corruption

* admin/plugin: implement debounced persistence flusher and optimize BuildJobDetail memory lookups

* admin/plugin: fix priority overwrite and implement bounded wait in scheduler reserve

* admin/plugin: implement atomic file writes and fix run record side effects

* admin/plugin: use P prefix for parity shard labels in execution plans

* admin/plugin: enable parallel execution for cancellation tests

* admin: refactor time.Time fields to pointers for better JSON omitempty support

* admin/plugin: implement pointer-safe time assignments and comparisons in plugin core

* admin/plugin: fix time assignment and sorting logic in plugin monitor after pointer refactor

* admin/plugin: update scheduler activity tracking to use time pointers

* admin/plugin: fix time-based run history trimming after pointer refactor

* admin/dash: fix JobSpec struct literal in plugin API after pointer refactor

* admin/view: add D/P prefixes to EC shard badges for UI consistency

* admin/plugin: use lifecycle-aware context for schema prefetching

* Update ec_volume_details_templ.go

* admin/stress: fix proposal sorting and log volume cleanup errors

* stress: refine ec stress runner with math/rand and collection name

- Added Collection field to VolumeEcShardsDeleteRequest for correct filename construction.
- Replaced crypto/rand with seeded math/rand PRNG for bulk payloads.
- Added documentation for EcMinAge zero-value behavior.
- Added logging for ignored errors in volume/shard deletion.

* admin: return internal server error for plugin store failures

Changed error status code from 400 Bad Request to 500 Internal Server Error for failures in GetPluginJobDetail to correctly reflect server-side errors.

* admin: implement safe channel sends and graceful shutdown sync

- Added sync.WaitGroup to Plugin struct to manage background goroutines.
- Implemented safeSendCh helper using recover() to prevent panics on closed channels.
- Ensured Shutdown() waits for all background operations to complete.

* admin: robustify plugin monitor with nil-safe time and record init

- Standardized nil-safe assignment for *time.Time pointers (CreatedAt, UpdatedAt, CompletedAt).
- Ensured persistJobDetailSnapshot initializes new records correctly if they don't exist on disk.
- Fixed debounced persistence to trigger immediate write on job completion.

* admin: improve scheduler shutdown behavior and logic guards

- Replaced brittle error string matching with explicit r.shutdownCh selection for shutdown detection.
- Removed redundant nil guard in buildScheduledJobSpec.
- Standardized WaitGroup usage for schedulerLoop.

* admin: implement deep copy for job parameters and atomic write fixes

- Implemented deepCopyGenericValue and used it in cloneTrackedJob to prevent shared state.
- Ensured atomicWriteFile creates parent directories before writing.

* admin: remove unreachable branch in shard classification

Removed an unreachable 'totalShards <= 0' check in classifyShardID as dataShards and parityShards are already guarded.

* admin: secure UI links and use canonical shard constants

- Added rel="noopener noreferrer" to external links for security.
- Replaced magic number 14 with erasure_coding.TotalShardsCount.
- Used renderEcShardBadge for missing shard list consistency.

* admin: stabilize plugin tests and fix regressions

- Composed a robust plugin_monitor_test.go to handle asynchronous persistence.
- Updated all time.Time literals to use timeToPtr helper.
- Added explicit Shutdown() calls in tests to synchronize with debounced writes.
- Fixed syntax errors and orphaned struct literals in tests.

* Potential fix for code scanning alert no. 278: Slice memory allocation with excessive size value

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 283: Uncontrolled data used in path expression

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* admin: finalize refinements for error handling, scheduler, and race fixes

- Standardized HTTP 500 status codes for store failures in plugin_api.go.
- Tracked scheduled detection goroutines with sync.WaitGroup for safe shutdown.
- Fixed race condition in safeSendDetectionComplete by extracting channel under lock.
- Implemented deep copy for JobActivity details.
- Used defaultDirPerm constant in atomicWriteFile.

* test(ec): migrate admin dockertest to plugin APIs

* admin/plugin_api: fix RunPluginJobTypeAPI to return 500 for server-side detection/filter errors

* admin/plugin_api: fix ExecutePluginJobAPI to return 500 for job execution failures

* admin/plugin_api: limit parseProtoJSONBody request body to 1MB to prevent unbounded memory usage

* admin/plugin: consolidate regex to package-level validJobTypePattern; add char validation to sanitizeJobID

* admin/plugin: fix racy Shutdown channel close with sync.Once

* admin/plugin: track sendLoop and recv goroutines in WorkerStream with r.wg

* admin/plugin: document writeProtoFiles atomicity — .pb is source of truth, .json is human-readable only

* admin/plugin: extract activityLess helper to deduplicate nil-safe OccurredAt sort comparators

* test/ec: check http.NewRequest errors to prevent nil req panics

* test/ec: replace deprecated ioutil/math/rand, fix stale step comment 5.1→3.1

* plugin(ec): raise default detection and scheduling throughput limits

* topology: include empty disks in volume list and EC capacity fallback

* topology: remove hard 10-task cap for detection planning

* Update ec_volume_details_templ.go

* adjust default

* fix tests

---------

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
2026-02-18 13:42:41 -08:00

369 lines
10 KiB
Go

package admin_dockertest
import (
"bytes"
crand "crypto/rand"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
)
const (
AdminUrl = "http://localhost:23646"
MasterUrl = "http://localhost:9333"
FilerUrl = "http://localhost:8888"
)
// Helper to run commands in background and track PIDs for cleanup
var runningCmds []*exec.Cmd
func cleanup() {
for _, cmd := range runningCmds {
if cmd.Process != nil {
cmd.Process.Kill()
}
}
}
func startWeed(t *testing.T, name string, args ...string) *exec.Cmd {
cmd := exec.Command("./weed_bin", args...)
// Create logs dir in local ./tmp
wd, _ := os.Getwd()
logDir := filepath.Join(wd, "tmp", "logs")
os.MkdirAll(logDir, 0755)
logFile, err := os.Create(filepath.Join(logDir, name+".log"))
if err != nil {
t.Fatalf("Failed to create log file: %v", err)
}
cmd.Stdout = logFile
cmd.Stderr = logFile
// Set Cwd to test directory so it finds local ./tmp
cmd.Dir = wd
// assume "weed_bin" binary is in project root.
rootDir := filepath.Dir(filepath.Dir(filepath.Dir(wd)))
cmd.Path = filepath.Join(rootDir, "weed_bin")
err = cmd.Start()
if err != nil {
t.Fatalf("Failed to start weed %v: %v", args, err)
}
runningCmds = append(runningCmds, cmd)
return cmd
}
func stopWeed(t *testing.T, cmd *exec.Cmd) {
if cmd != nil && cmd.Process != nil {
t.Logf("Stopping process %d", cmd.Process.Pid)
cmd.Process.Kill()
cmd.Wait()
// Remove from runningCmds to avoid double kill in cleanup
for i, c := range runningCmds {
if c == cmd {
runningCmds = append(runningCmds[:i], runningCmds[i+1:]...)
break
}
}
}
}
func ensureEnvironment(t *testing.T) {
// 1. Build weed binary
wd, _ := os.Getwd()
rootDir := filepath.Dir(filepath.Dir(filepath.Dir(wd))) // Up 3 levels
buildCmd := exec.Command("go", "build", "-o", "weed_bin", "./weed")
buildCmd.Dir = rootDir
buildCmd.Stdout = os.Stdout
buildCmd.Stderr = os.Stderr
if err := buildCmd.Run(); err != nil {
t.Fatalf("Failed to build weed: %v", err)
}
t.Log("Successfully built weed binary")
// 2. Start Master
// Use local ./tmp/master
os.RemoveAll("tmp")
err := os.MkdirAll(filepath.Join("tmp", "master"), 0755)
if err != nil {
t.Fatalf("Failed to create tmp dir: %v", err)
}
startWeed(t, "master", "master", "-mdir=./tmp/master", "-port=9333", "-ip=localhost", "-peers=none", "-volumeSizeLimitMB=100")
// Wait for master
waitForUrl(t, MasterUrl+"/cluster/status", 10)
// 3. Start Volume Server (Worker)
// Start 14 volume servers to verify RS(10,4) default EC
for i := 1; i <= 14; i++ {
volName := fmt.Sprintf("volume%d", i)
port := 8080 + i - 1
dir := filepath.Join("tmp", volName)
os.MkdirAll(dir, 0755)
startWeed(t, volName, "volume", "-dir="+dir, "-mserver=localhost:9333", fmt.Sprintf("-port=%d", port), "-ip=localhost")
}
// 4. Start Filer
os.MkdirAll(filepath.Join("tmp", "filer"), 0755)
startWeed(t, "filer", "filer", "-defaultStoreDir=./tmp/filer", "-master=localhost:9333", "-port=8888", "-ip=localhost")
waitForUrl(t, FilerUrl+"/", 60)
// 5. Start Workers (Maintenance)
// We need workers to execute EC tasks
for i := 1; i <= 2; i++ {
workerName := fmt.Sprintf("worker%d", i)
metricsPort := 9327 + i - 1
debugPort := 6060 + i
dir, _ := filepath.Abs(filepath.Join("tmp", workerName))
os.MkdirAll(dir, 0755)
startWeed(t, workerName, "worker", "-admin=localhost:23646", "-workingDir="+dir, fmt.Sprintf("-metricsPort=%d", metricsPort), fmt.Sprintf("-debug.port=%d", debugPort))
}
// 6. Start Admin
os.RemoveAll(filepath.Join("tmp", "admin"))
os.MkdirAll(filepath.Join("tmp", "admin"), 0755)
startWeed(t, "admin", "admin", "-master=localhost:9333", "-port=23646", "-dataDir=./tmp/admin")
waitForUrl(t, AdminUrl+"/health", 60)
t.Log("Environment started successfully")
}
func waitForUrl(t *testing.T, url string, retries int) {
for i := 0; i < retries; i++ {
resp, err := http.Get(url)
if err == nil && resp.StatusCode == 200 {
resp.Body.Close()
return
}
time.Sleep(1 * time.Second)
}
t.Fatalf("Timeout waiting for %s", url)
}
func TestEcEndToEnd(t *testing.T) {
defer cleanup()
ensureEnvironment(t)
client := &http.Client{}
// 1. Configure plugin job types for fast EC detection/execution.
t.Log("Configuring plugin job types via API...")
// Disable volume balance to reduce interference for this EC-focused test.
balanceConfig := map[string]interface{}{
"job_type": "volume_balance",
"admin_runtime": map[string]interface{}{
"enabled": false,
},
}
jsonBody, err := json.Marshal(balanceConfig)
if err != nil {
t.Fatalf("Failed to marshal volume_balance config: %v", err)
}
req, err := http.NewRequest("PUT", AdminUrl+"/api/plugin/job-types/volume_balance/config", bytes.NewBuffer(jsonBody))
if err != nil {
t.Fatalf("Failed to create volume_balance config request: %v", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
t.Fatalf("Failed to update volume_balance config: %v", err)
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
t.Fatalf("Failed to update volume_balance config (status %d): %s", resp.StatusCode, string(body))
}
resp.Body.Close()
ecConfig := map[string]interface{}{
"job_type": "erasure_coding",
"admin_runtime": map[string]interface{}{
"enabled": true,
"detection_interval_seconds": 1,
"global_execution_concurrency": 4,
"per_worker_execution_concurrency": 4,
"max_jobs_per_detection": 100,
},
"worker_config_values": map[string]interface{}{
"quiet_for_seconds": map[string]interface{}{
"int64_value": "1",
},
"min_interval_seconds": map[string]interface{}{
"int64_value": "1",
},
"min_size_mb": map[string]interface{}{
"int64_value": "1",
},
"fullness_ratio": map[string]interface{}{
"double_value": 0.0001,
},
},
}
jsonBody, err = json.Marshal(ecConfig)
if err != nil {
t.Fatalf("Failed to marshal erasure_coding config: %v", err)
}
req, err = http.NewRequest("PUT", AdminUrl+"/api/plugin/job-types/erasure_coding/config", bytes.NewBuffer(jsonBody))
if err != nil {
t.Fatalf("Failed to create erasure_coding config request: %v", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err = client.Do(req)
if err != nil {
t.Fatalf("Failed to update erasure_coding config: %v", err)
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
t.Fatalf("Failed to update erasure_coding config (status %d): %s", resp.StatusCode, string(body))
}
resp.Body.Close()
// 2. Upload a file
fileSize := 5 * 1024 * 1024
data := make([]byte, fileSize)
crand.Read(data)
fileName := fmt.Sprintf("ec_test_file_%d", time.Now().Unix())
t.Logf("Uploading %d bytes file %s to Filer...", fileSize, fileName)
uploadUrl := FilerUrl + "/" + fileName
var uploadErr error
for i := 0; i < 10; i++ {
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(data))
if err != nil {
uploadErr = err
t.Logf("Upload attempt %d failed to create request: %v", i+1, err)
time.Sleep(2 * time.Second)
continue
}
resp, err := client.Do(req)
if err == nil {
if resp.StatusCode == 201 {
resp.Body.Close()
uploadErr = nil
break
}
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
uploadErr = fmt.Errorf("status %d: %s", resp.StatusCode, string(body))
} else {
uploadErr = err
}
t.Logf("Upload attempt %d failed: %v", i+1, uploadErr)
time.Sleep(2 * time.Second)
}
if uploadErr != nil {
t.Fatalf("Failed to upload file after retries: %v", uploadErr)
}
t.Log("Upload successful")
// 3. Verify EC Encoding
t.Log("Waiting for EC encoding (checking Master topology)...")
startTime := time.Now()
ecVerified := false
var lastBody []byte
for time.Since(startTime) < 300*time.Second {
// 3.1 Check Master Topology
resp, err := http.Get(MasterUrl + "/dir/status")
if err == nil {
lastBody, _ = io.ReadAll(resp.Body)
resp.Body.Close()
// Check total EC shards
reShards := regexp.MustCompile(`"EcShards":\s*(\d+)`)
matches := reShards.FindAllSubmatch(lastBody, -1)
totalShards := 0
for _, m := range matches {
var count int
fmt.Sscanf(string(m[1]), "%d", &count)
totalShards += count
}
if totalShards > 0 {
t.Logf("EC encoding verified (found %d total EcShards in topology) after %d seconds", totalShards, int(time.Since(startTime).Seconds()))
ecVerified = true
break
}
}
// 3.2 Debug: Check workers and jobs
wResp, wErr := http.Get(AdminUrl + "/api/plugin/workers")
workerCount := 0
if wErr == nil {
var workers []interface{}
json.NewDecoder(wResp.Body).Decode(&workers)
wResp.Body.Close()
workerCount = len(workers)
}
tResp, tErr := http.Get(AdminUrl + "/api/plugin/jobs?limit=1000")
taskCount := 0
if tErr == nil {
var tasks []interface{}
json.NewDecoder(tResp.Body).Decode(&tasks)
tResp.Body.Close()
taskCount = len(tasks)
}
t.Logf("Waiting for EC... (Workers: %d, Active Tasks: %d)", workerCount, taskCount)
time.Sleep(10 * time.Second)
}
if !ecVerified {
dumpLogs(t)
t.Fatalf("Timed out waiting for EC encoding verified in Topology. Last body: %s", string(lastBody))
}
// 6. Verification: Read back the file
t.Log("Reading back file...")
resp, err = http.Get(uploadUrl)
if err != nil {
dumpLogs(t)
t.Fatalf("Failed to read back file: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
dumpLogs(t)
t.Fatalf("Read back failed status: %d", resp.StatusCode)
}
content, _ := io.ReadAll(resp.Body)
if len(content) != fileSize {
dumpLogs(t)
t.Fatalf("Read back size mismatch: got %d, want %d", len(content), fileSize)
}
// Verify byte-wise content equality
if !bytes.Equal(content, data) {
dumpLogs(t)
t.Fatalf("Read back content mismatch: uploaded and downloaded data differ")
}
t.Log("Test PASS: EC encoding and read back successful!")
}
func dumpLogs(t *testing.T) {
wd, _ := os.Getwd()
logDir := filepath.Join(wd, "tmp", "logs")
files, _ := os.ReadDir(logDir)
for _, f := range files {
if strings.HasSuffix(f.Name(), ".log") {
content, _ := os.ReadFile(filepath.Join(logDir, f.Name()))
t.Logf("--- LOG DUMP: %s ---\n%s\n--- END LOG ---", f.Name(), string(content))
}
}
}