Refactor plugin system and migrate worker runtime (#8369)

* admin: add plugin runtime UI page and route wiring

* pb: add plugin gRPC contract and generated bindings

* admin/plugin: implement worker registry, runtime, monitoring, and config store

* admin/dash: wire plugin runtime and expose plugin workflow APIs

* command: add flags to enable plugin runtime

* admin: rename remaining plugin v2 wording to plugin

* admin/plugin: add detectable job type registry helper

* admin/plugin: add scheduled detection and dispatch orchestration

* admin/plugin: prefetch job type descriptors when workers connect

* admin/plugin: add known job type discovery API and UI

* admin/plugin: refresh design doc to match current implementation

* admin/plugin: enforce per-worker scheduler concurrency limits

* admin/plugin: use descriptor runtime defaults for scheduler policy

* admin/ui: auto-load first known plugin job type on page open

* admin/plugin: bootstrap persisted config from descriptor defaults

* admin/plugin: dedupe scheduled proposals by dedupe key

* admin/ui: add job type and state filters for plugin monitoring

* admin/ui: add per-job-type plugin activity summary

* admin/plugin: split descriptor read API from schema refresh

* admin/ui: keep plugin summary metrics global while tables are filtered

* admin/plugin: retry executor reservation before timing out

* admin/plugin: expose scheduler states for monitoring

* admin/ui: show per-job-type scheduler states in plugin monitor

* pb/plugin: rename protobuf package to plugin

* admin/plugin: rename pluginRuntime wiring to plugin

* admin/plugin: remove runtime naming from plugin APIs and UI

* admin/plugin: rename runtime files to plugin naming

* admin/plugin: persist jobs and activities for monitor recovery

* admin/plugin: lease one detector worker per job type

* admin/ui: show worker load from plugin heartbeats

* admin/plugin: skip stale workers for detector and executor picks

* plugin/worker: add plugin worker command and stream runtime scaffold

* plugin/worker: implement vacuum detect and execute handlers

* admin/plugin: document external vacuum plugin worker starter

* command: update plugin.worker help to reflect implemented flow

* command/admin: drop legacy Plugin V2 label

* plugin/worker: validate vacuum job type and respect min interval

* plugin/worker: test no-op detect when min interval not elapsed

* command/admin: document plugin.worker external process

* plugin/worker: advertise configured concurrency in hello

* command/plugin.worker: add jobType handler selection

* command/plugin.worker: test handler selection by job type

* command/plugin.worker: persist worker id in workingDir

* admin/plugin: document plugin.worker jobType and workingDir flags

* plugin/worker: support cancel request for in-flight work

* plugin/worker: test cancel request acknowledgements

* command/plugin.worker: document workingDir and jobType behavior

* plugin/worker: emit executor activity events for monitor

* plugin/worker: test executor activity builder

* admin/plugin: send last successful run in detection request

* admin/plugin: send cancel request when detect or execute context ends

* admin/plugin: document worker cancel request responsibility

* admin/handlers: expose plugin scheduler states API in no-auth mode

* admin/handlers: test plugin scheduler states route registration

* admin/plugin: keep worker id on worker-generated activity records

* admin/plugin: test worker id propagation in monitor activities

* admin/dash: always initialize plugin service

* command/admin: remove plugin enable flags and default to enabled

* admin/dash: drop pluginEnabled constructor parameter

* admin/plugin UI: stop checking plugin enabled state

* admin/plugin: remove docs for plugin enable flags

* admin/dash: remove unused plugin enabled check method

* admin/dash: fallback to in-memory plugin init when dataDir fails

* admin/plugin API: expose worker gRPC port in status

* command/plugin.worker: resolve admin gRPC port via plugin status

* split plugin UI into overview/configuration/monitoring pages

* Update layout_templ.go

* add volume_balance plugin worker handler

* wire plugin.worker CLI for volume_balance job type

* add erasure_coding plugin worker handler

* wire plugin.worker CLI for erasure_coding job type

* support multi-job handlers in plugin worker runtime

* allow plugin.worker jobType as comma-separated list

* admin/plugin UI: rename to Workers and simplify config view

* plugin worker: queue detection requests instead of capacity reject

* Update plugin_worker.go

* plugin volume_balance: remove force_move/timeout from worker config UI

* plugin erasure_coding: enforce local working dir and cleanup

* admin/plugin UI: rename admin settings to job scheduling

* admin/plugin UI: persist and robustly render detection results

* admin/plugin: record and return detection trace metadata

* admin/plugin UI: show detection process and decision trace

* plugin: surface detector decision trace as activities

* mini: start a plugin worker by default

* admin/plugin UI: split monitoring into detection and execution tabs

* plugin worker: emit detection decision trace for EC and balance

* admin workers UI: split monitoring into detection and execution pages

* plugin scheduler: skip proposals for active assigned/running jobs

* admin workers UI: add job queue tab

* plugin worker: add dummy stress detector and executor job type

* admin workers UI: reorder tabs to detection queue execution

* admin workers UI: regenerate plugin template

* plugin defaults: include dummy stress and add stress tests

* plugin dummy stress: rotate detection selections across runs

* plugin scheduler: remove cross-run proposal dedupe

* plugin queue: track pending scheduled jobs

* plugin scheduler: wait for executor capacity before dispatch

* plugin scheduler: skip detection when waiting backlog is high

* plugin: add disk-backed job detail API and persistence

* admin ui: show plugin job detail modal from job id links

* plugin: generate unique job ids instead of reusing proposal ids

* plugin worker: emit heartbeats on work state changes

* plugin registry: round-robin tied executor and detector picks

* add temporary EC overnight stress runner

* plugin job details: persist and render EC execution plans

* ec volume details: color data and parity shard badges

* shard labels: keep parity ids numeric and color-only distinction

* admin: remove legacy maintenance UI routes and templates

* admin: remove dead maintenance endpoint helpers

* Update layout_templ.go

* remove dummy_stress worker and command support

* refactor plugin UI to job-type top tabs and sub-tabs

* migrate weed worker command to plugin runtime

* remove plugin.worker command and keep worker runtime with metrics

* update helm worker args for jobType and execution flags

* set plugin scheduling defaults to global 16 and per-worker 4

* stress: fix RPC context reuse and remove redundant variables in ec_stress_runner

* admin/plugin: fix lifecycle races, safe channel operations, and terminal state constants

* admin/dash: randomize job IDs and fix priority zero-value overwrite in plugin API

* admin/handlers: implement buffered rendering to prevent response corruption

* admin/plugin: implement debounced persistence flusher and optimize BuildJobDetail memory lookups

* admin/plugin: fix priority overwrite and implement bounded wait in scheduler reserve

* admin/plugin: implement atomic file writes and fix run record side effects

* admin/plugin: use P prefix for parity shard labels in execution plans

* admin/plugin: enable parallel execution for cancellation tests

* admin: refactor time.Time fields to pointers for better JSON omitempty support

* admin/plugin: implement pointer-safe time assignments and comparisons in plugin core

* admin/plugin: fix time assignment and sorting logic in plugin monitor after pointer refactor

* admin/plugin: update scheduler activity tracking to use time pointers

* admin/plugin: fix time-based run history trimming after pointer refactor

* admin/dash: fix JobSpec struct literal in plugin API after pointer refactor

* admin/view: add D/P prefixes to EC shard badges for UI consistency

* admin/plugin: use lifecycle-aware context for schema prefetching

* Update ec_volume_details_templ.go

* admin/stress: fix proposal sorting and log volume cleanup errors

* stress: refine ec stress runner with math/rand and collection name

- Added Collection field to VolumeEcShardsDeleteRequest for correct filename construction.
- Replaced crypto/rand with seeded math/rand PRNG for bulk payloads.
- Added documentation for EcMinAge zero-value behavior.
- Added logging for ignored errors in volume/shard deletion.

* admin: return internal server error for plugin store failures

Changed error status code from 400 Bad Request to 500 Internal Server Error for failures in GetPluginJobDetail to correctly reflect server-side errors.

* admin: implement safe channel sends and graceful shutdown sync

- Added sync.WaitGroup to Plugin struct to manage background goroutines.
- Implemented safeSendCh helper using recover() to prevent panics on closed channels.
- Ensured Shutdown() waits for all background operations to complete.

* admin: robustify plugin monitor with nil-safe time and record init

- Standardized nil-safe assignment for *time.Time pointers (CreatedAt, UpdatedAt, CompletedAt).
- Ensured persistJobDetailSnapshot initializes new records correctly if they don't exist on disk.
- Fixed debounced persistence to trigger immediate write on job completion.

* admin: improve scheduler shutdown behavior and logic guards

- Replaced brittle error string matching with explicit r.shutdownCh selection for shutdown detection.
- Removed redundant nil guard in buildScheduledJobSpec.
- Standardized WaitGroup usage for schedulerLoop.

* admin: implement deep copy for job parameters and atomic write fixes

- Implemented deepCopyGenericValue and used it in cloneTrackedJob to prevent shared state.
- Ensured atomicWriteFile creates parent directories before writing.

* admin: remove unreachable branch in shard classification

Removed an unreachable 'totalShards <= 0' check in classifyShardID as dataShards and parityShards are already guarded.

* admin: secure UI links and use canonical shard constants

- Added rel="noopener noreferrer" to external links for security.
- Replaced magic number 14 with erasure_coding.TotalShardsCount.
- Used renderEcShardBadge for missing shard list consistency.

* admin: stabilize plugin tests and fix regressions

- Composed a robust plugin_monitor_test.go to handle asynchronous persistence.
- Updated all time.Time literals to use timeToPtr helper.
- Added explicit Shutdown() calls in tests to synchronize with debounced writes.
- Fixed syntax errors and orphaned struct literals in tests.

* Potential fix for code scanning alert no. 278: Slice memory allocation with excessive size value

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 283: Uncontrolled data used in path expression

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* admin: finalize refinements for error handling, scheduler, and race fixes

- Standardized HTTP 500 status codes for store failures in plugin_api.go.
- Tracked scheduled detection goroutines with sync.WaitGroup for safe shutdown.
- Fixed race condition in safeSendDetectionComplete by extracting channel under lock.
- Implemented deep copy for JobActivity details.
- Used defaultDirPerm constant in atomicWriteFile.

* test(ec): migrate admin dockertest to plugin APIs

* admin/plugin_api: fix RunPluginJobTypeAPI to return 500 for server-side detection/filter errors

* admin/plugin_api: fix ExecutePluginJobAPI to return 500 for job execution failures

* admin/plugin_api: limit parseProtoJSONBody request body to 1MB to prevent unbounded memory usage

* admin/plugin: consolidate regex to package-level validJobTypePattern; add char validation to sanitizeJobID

* admin/plugin: fix racy Shutdown channel close with sync.Once

* admin/plugin: track sendLoop and recv goroutines in WorkerStream with r.wg

* admin/plugin: document writeProtoFiles atomicity — .pb is source of truth, .json is human-readable only

* admin/plugin: extract activityLess helper to deduplicate nil-safe OccurredAt sort comparators

* test/ec: check http.NewRequest errors to prevent nil req panics

* test/ec: replace deprecated ioutil/math/rand, fix stale step comment 5.1→3.1

* plugin(ec): raise default detection and scheduling throughput limits

* topology: include empty disks in volume list and EC capacity fallback

* topology: remove hard 10-task cap for detection planning

* Update ec_volume_details_templ.go

* adjust default

* fix tests

---------

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
This commit is contained in:
Chris Lu
2026-02-18 13:42:41 -08:00
committed by GitHub
parent 5463038760
commit 8ec9ff4a12
82 changed files with 23419 additions and 11389 deletions

View File

@@ -212,8 +212,9 @@ To enable workers, add the following to your values.yaml:
worker: worker:
enabled: true enabled: true
replicas: 2 # Scale based on workload replicas: 2 # Scale based on workload
capabilities: "vacuum,balance,erasure_coding" # Tasks this worker can handle jobType: "vacuum,volume_balance,erasure_coding" # Job types this worker can handle
maxConcurrent: 3 # Maximum concurrent tasks per worker maxDetect: 1 # Maximum concurrent detection requests
maxExecute: 4 # Maximum concurrent execution jobs per worker
# Working directory for task execution # Working directory for task execution
# Default: "/tmp/seaweedfs-worker" # Default: "/tmp/seaweedfs-worker"
@@ -248,14 +249,14 @@ worker:
memory: "2Gi" memory: "2Gi"
``` ```
### Worker Capabilities ### Worker Job Types
Workers can be configured with different capabilities: Workers can be configured with different job types:
- **vacuum**: Reclaim deleted file space - **vacuum**: Reclaim deleted file space
- **balance**: Balance volumes across volume servers - **volume_balance**: Balance volumes across volume servers
- **erasure_coding**: Handle erasure coding operations - **erasure_coding**: Handle erasure coding operations
You can configure workers with all capabilities or create specialized worker pools with specific capabilities. You can configure workers with all job types or create specialized worker pools with specific job types.
### Worker Deployment Strategy ### Worker Deployment Strategy
@@ -264,11 +265,11 @@ For production deployments, consider:
1. **Multiple Workers**: Deploy 2+ worker replicas for high availability 1. **Multiple Workers**: Deploy 2+ worker replicas for high availability
2. **Resource Allocation**: Workers need sufficient CPU/memory for maintenance tasks 2. **Resource Allocation**: Workers need sufficient CPU/memory for maintenance tasks
3. **Storage**: Workers need temporary storage for vacuum and balance operations (size depends on volume size) 3. **Storage**: Workers need temporary storage for vacuum and balance operations (size depends on volume size)
4. **Specialized Workers**: Create separate worker deployments for different capabilities if needed 4. **Specialized Workers**: Create separate worker deployments for different job types if needed
Example specialized worker configuration: Example specialized worker configuration:
For specialized worker pools, deploy separate Helm releases with different capabilities: For specialized worker pools, deploy separate Helm releases with different job types:
**values-worker-vacuum.yaml** (for vacuum operations): **values-worker-vacuum.yaml** (for vacuum operations):
```yaml ```yaml
@@ -287,8 +288,8 @@ admin:
worker: worker:
enabled: true enabled: true
replicas: 2 replicas: 2
capabilities: "vacuum" jobType: "vacuum"
maxConcurrent: 2 maxExecute: 2
# REQUIRED: Point to the admin service of your main SeaweedFS release # REQUIRED: Point to the admin service of your main SeaweedFS release
# Replace <namespace> with the namespace where your main seaweedfs is deployed # Replace <namespace> with the namespace where your main seaweedfs is deployed
# Example: If deploying in namespace "production": # Example: If deploying in namespace "production":
@@ -313,8 +314,8 @@ admin:
worker: worker:
enabled: true enabled: true
replicas: 1 replicas: 1
capabilities: "balance" jobType: "volume_balance"
maxConcurrent: 1 maxExecute: 1
# REQUIRED: Point to the admin service of your main SeaweedFS release # REQUIRED: Point to the admin service of your main SeaweedFS release
# Replace <namespace> with the namespace where your main seaweedfs is deployed # Replace <namespace> with the namespace where your main seaweedfs is deployed
# Example: If deploying in namespace "production": # Example: If deploying in namespace "production":

View File

@@ -136,11 +136,15 @@ spec:
{{- else }} {{- else }}
-admin={{ template "seaweedfs.fullname" . }}-admin.{{ .Release.Namespace }}:{{ .Values.admin.port }}{{ if .Values.admin.grpcPort }}.{{ .Values.admin.grpcPort }}{{ end }} \ -admin={{ template "seaweedfs.fullname" . }}-admin.{{ .Release.Namespace }}:{{ .Values.admin.port }}{{ if .Values.admin.grpcPort }}.{{ .Values.admin.grpcPort }}{{ end }} \
{{- end }} {{- end }}
-capabilities={{ .Values.worker.capabilities }} \ -jobType={{ .Values.worker.jobType }} \
-maxConcurrent={{ .Values.worker.maxConcurrent }} \ -maxDetect={{ .Values.worker.maxDetect }} \
-workingDir={{ .Values.worker.workingDir }}{{- if or .Values.worker.metricsPort .Values.worker.extraArgs }} \{{ end }} -maxExecute={{ .Values.worker.maxExecute }} \
-workingDir={{ .Values.worker.workingDir }}{{- if or .Values.worker.metricsPort .Values.worker.metricsIp .Values.worker.extraArgs }} \{{ end }}
{{- if .Values.worker.metricsPort }} {{- if .Values.worker.metricsPort }}
-metricsPort={{ .Values.worker.metricsPort }}{{- if .Values.worker.extraArgs }} \{{ end }} -metricsPort={{ .Values.worker.metricsPort }}{{- if or .Values.worker.metricsIp .Values.worker.extraArgs }} \{{ end }}
{{- end }}
{{- if .Values.worker.metricsIp }}
-metricsIp={{ .Values.worker.metricsIp }}{{- if .Values.worker.extraArgs }} \{{ end }}
{{- end }} {{- end }}
{{- range $index, $arg := .Values.worker.extraArgs }} {{- range $index, $arg := .Values.worker.extraArgs }}
{{ $arg }}{{- if lt $index (sub (len $.Values.worker.extraArgs) 1) }} \{{ end }} {{ $arg }}{{- if lt $index (sub (len $.Values.worker.extraArgs) 1) }} \{{ end }}

View File

@@ -1270,17 +1270,20 @@ worker:
replicas: 1 replicas: 1
loggingOverrideLevel: null loggingOverrideLevel: null
metricsPort: 9327 metricsPort: 9327
metricsIp: "" # If empty, defaults to 0.0.0.0
# Admin server to connect to # Admin server to connect to
adminServer: "" adminServer: ""
# Worker capabilities - comma-separated list # Worker job types - comma-separated list
# Available: vacuum, balance, erasure_coding # Available: vacuum, volume_balance, erasure_coding
# Default: "vacuum,balance,erasure_coding" (all capabilities) jobType: "vacuum,volume_balance,erasure_coding"
capabilities: "vacuum,balance,erasure_coding"
# Maximum number of concurrent tasks # Maximum number of concurrent detection requests
maxConcurrent: 3 maxDetect: 1
# Maximum number of concurrent execution jobs
maxExecute: 4
# Working directory for task execution # Working directory for task execution
workingDir: "/tmp/seaweedfs-worker" workingDir: "/tmp/seaweedfs-worker"

View File

@@ -2,13 +2,11 @@ package admin_dockertest
import ( import (
"bytes" "bytes"
crand "crypto/rand"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math/rand"
"net/http" "net/http"
"net/url"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
@@ -161,129 +159,95 @@ func TestEcEndToEnd(t *testing.T) {
client := &http.Client{} client := &http.Client{}
// 1. Configure Global Maintenance (Scan Interval = 1s) via API // 1. Configure plugin job types for fast EC detection/execution.
t.Log("Configuring Global Maintenance via API...") t.Log("Configuring plugin job types via API...")
// 1.1 Fetch current config // Disable volume balance to reduce interference for this EC-focused test.
req, _ := http.NewRequest("GET", AdminUrl+"/api/maintenance/config", nil) balanceConfig := map[string]interface{}{
"job_type": "volume_balance",
"admin_runtime": map[string]interface{}{
"enabled": false,
},
}
jsonBody, err := json.Marshal(balanceConfig)
if err != nil {
t.Fatalf("Failed to marshal volume_balance config: %v", err)
}
req, err := http.NewRequest("PUT", AdminUrl+"/api/plugin/job-types/volume_balance/config", bytes.NewBuffer(jsonBody))
if err != nil {
t.Fatalf("Failed to create volume_balance config request: %v", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req) resp, err := client.Do(req)
if err != nil { if err != nil {
t.Fatalf("Failed to get global config: %v", err) t.Fatalf("Failed to update volume_balance config: %v", err)
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body) body, _ := io.ReadAll(resp.Body)
t.Fatalf("Failed to get global config (status %d): %s", resp.StatusCode, string(body)) t.Fatalf("Failed to update volume_balance config (status %d): %s", resp.StatusCode, string(body))
}
var globalConfig map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&globalConfig); err != nil {
t.Fatalf("Failed to decode global config: %v", err)
} }
resp.Body.Close() resp.Body.Close()
// 1.2 Modify config ecConfig := map[string]interface{}{
globalConfig["enabled"] = true "job_type": "erasure_coding",
globalConfig["scan_interval_seconds"] = 1 "admin_runtime": map[string]interface{}{
"enabled": true,
// Ensure policy structure exists "detection_interval_seconds": 1,
if globalConfig["policy"] == nil { "global_execution_concurrency": 4,
globalConfig["policy"] = map[string]interface{}{} "per_worker_execution_concurrency": 4,
"max_jobs_per_detection": 100,
},
"worker_config_values": map[string]interface{}{
"quiet_for_seconds": map[string]interface{}{
"int64_value": "1",
},
"min_interval_seconds": map[string]interface{}{
"int64_value": "1",
},
"min_size_mb": map[string]interface{}{
"int64_value": "1",
},
"fullness_ratio": map[string]interface{}{
"double_value": 0.0001,
},
},
} }
policy, _ := globalConfig["policy"].(map[string]interface{}) jsonBody, err = json.Marshal(ecConfig)
if err != nil {
// Ensure task_policies structure exists t.Fatalf("Failed to marshal erasure_coding config: %v", err)
if policy["task_policies"] == nil {
policy["task_policies"] = map[string]interface{}{}
} }
taskPolicies, _ := policy["task_policies"].(map[string]interface{}) req, err = http.NewRequest("PUT", AdminUrl+"/api/plugin/job-types/erasure_coding/config", bytes.NewBuffer(jsonBody))
if err != nil {
// Disable balance tasks to avoid interference with EC test t.Fatalf("Failed to create erasure_coding config request: %v", err)
if taskPolicies["balance"] == nil {
taskPolicies["balance"] = map[string]interface{}{}
} }
balancePolicy, _ := taskPolicies["balance"].(map[string]interface{})
balancePolicy["enabled"] = false
// Set global max concurrent
policy["global_max_concurrent"] = 4
globalConfig["policy"] = policy
// Explicitly set required fields
requiredFields := map[string]float64{
"worker_timeout_seconds": 300,
"task_timeout_seconds": 7200,
"retry_delay_seconds": 900,
"cleanup_interval_seconds": 86400,
"task_retention_seconds": 604800,
"max_retries": 3,
}
for field, val := range requiredFields {
if _, ok := globalConfig[field]; !ok || globalConfig[field] == 0 {
globalConfig[field] = val
}
}
// 1.3 Update config
jsonBody, _ := json.Marshal(globalConfig)
req, _ = http.NewRequest("PUT", AdminUrl+"/api/maintenance/config", bytes.NewBuffer(jsonBody))
req.Header.Set("Content-Type", "application/json") req.Header.Set("Content-Type", "application/json")
resp, err = client.Do(req) resp, err = client.Do(req)
if err != nil { if err != nil {
t.Fatalf("Failed to update global config: %v", err) t.Fatalf("Failed to update erasure_coding config: %v", err)
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body) body, _ := io.ReadAll(resp.Body)
t.Fatalf("Failed to update global config (status %d): %s", resp.StatusCode, string(body)) t.Fatalf("Failed to update erasure_coding config (status %d): %s", resp.StatusCode, string(body))
} }
resp.Body.Close() resp.Body.Close()
// 2. Configure EC Task (Short intervals) via Form API // 2. Upload a file
t.Log("Configuring EC Task via Form API...")
formData := url.Values{}
formData.Set("enabled", "true")
formData.Set("scan_interval_seconds", "1")
formData.Set("repeat_interval_seconds", "1")
formData.Set("check_interval_seconds", "1")
formData.Set("max_concurrent", "4")
formData.Set("quiet_for_seconds_value", "1")
formData.Set("quiet_for_seconds_unit", "seconds")
formData.Set("min_size_mb", "1")
formData.Set("fullness_ratio", "0.0001")
req, _ = http.NewRequest("POST", AdminUrl+"/maintenance/config/erasure_coding", strings.NewReader(formData.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err = client.Do(req)
if err != nil {
t.Fatalf("Failed to update EC config: %v", err)
}
if resp.StatusCode != 200 && resp.StatusCode != 303 {
body, _ := io.ReadAll(resp.Body)
t.Fatalf("Failed to update EC config (status %d): %s", resp.StatusCode, string(body))
}
resp.Body.Close()
t.Log("EC Task Configuration updated")
// 3. Restart Admin to pick up Global Config (Scan Interval)
if len(runningCmds) > 0 {
adminCmd := runningCmds[len(runningCmds)-1]
t.Log("Restarting Admin Server to apply configuration...")
stopWeed(t, adminCmd)
time.Sleep(10 * time.Second)
startWeed(t, "admin_restarted", "admin", "-master=localhost:9333", "-port=23646", "-port.grpc=33646", "-dataDir=./tmp/admin")
waitForUrl(t, AdminUrl+"/health", 60)
}
// 4. Upload a file
fileSize := 5 * 1024 * 1024 fileSize := 5 * 1024 * 1024
data := make([]byte, fileSize) data := make([]byte, fileSize)
rand.Read(data) crand.Read(data)
fileName := fmt.Sprintf("ec_test_file_%d", time.Now().Unix()) fileName := fmt.Sprintf("ec_test_file_%d", time.Now().Unix())
t.Logf("Uploading %d bytes file %s to Filer...", fileSize, fileName) t.Logf("Uploading %d bytes file %s to Filer...", fileSize, fileName)
uploadUrl := FilerUrl + "/" + fileName uploadUrl := FilerUrl + "/" + fileName
var uploadErr error var uploadErr error
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
req, _ := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(data)) req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(data))
if err != nil {
uploadErr = err
t.Logf("Upload attempt %d failed to create request: %v", i+1, err)
time.Sleep(2 * time.Second)
continue
}
resp, err := client.Do(req) resp, err := client.Do(req)
if err == nil { if err == nil {
if resp.StatusCode == 201 { if resp.StatusCode == 201 {
@@ -306,17 +270,17 @@ func TestEcEndToEnd(t *testing.T) {
} }
t.Log("Upload successful") t.Log("Upload successful")
// 5. Verify EC Encoding // 3. Verify EC Encoding
t.Log("Waiting for EC encoding (checking Master topology)...") t.Log("Waiting for EC encoding (checking Master topology)...")
startTime := time.Now() startTime := time.Now()
ecVerified := false ecVerified := false
var lastBody []byte var lastBody []byte
for time.Since(startTime) < 300*time.Second { for time.Since(startTime) < 300*time.Second {
// 5.1 Check Master Topology // 3.1 Check Master Topology
resp, err := http.Get(MasterUrl + "/dir/status") resp, err := http.Get(MasterUrl + "/dir/status")
if err == nil { if err == nil {
lastBody, _ = ioutil.ReadAll(resp.Body) lastBody, _ = io.ReadAll(resp.Body)
resp.Body.Close() resp.Body.Close()
// Check total EC shards // Check total EC shards
@@ -336,8 +300,8 @@ func TestEcEndToEnd(t *testing.T) {
} }
} }
// 5.2 Debug: Check workers and tasks // 3.2 Debug: Check workers and jobs
wResp, wErr := http.Get(AdminUrl + "/api/maintenance/workers") wResp, wErr := http.Get(AdminUrl + "/api/plugin/workers")
workerCount := 0 workerCount := 0
if wErr == nil { if wErr == nil {
var workers []interface{} var workers []interface{}
@@ -346,7 +310,7 @@ func TestEcEndToEnd(t *testing.T) {
workerCount = len(workers) workerCount = len(workers)
} }
tResp, tErr := http.Get(AdminUrl + "/api/maintenance/tasks") tResp, tErr := http.Get(AdminUrl + "/api/plugin/jobs?limit=1000")
taskCount := 0 taskCount := 0
if tErr == nil { if tErr == nil {
var tasks []interface{} var tasks []interface{}

View File

@@ -0,0 +1,515 @@
package main
import (
"context"
"errors"
"flag"
"fmt"
"io"
"log"
mrand "math/rand"
"net/http"
"net/url"
"os/signal"
"path"
"sort"
"strings"
"sync"
"syscall"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
type config struct {
MasterAddresses []string
FilerURL string
PathPrefix string
Collection string
FileSizeBytes int64
BatchSize int
WriteInterval time.Duration
CleanupInterval time.Duration
EcMinAge time.Duration
MaxCleanupPerCycle int
RequestTimeout time.Duration
MaxRuntime time.Duration
DryRun bool
}
type runner struct {
cfg config
httpClient *http.Client
grpcDialOption grpc.DialOption
mu sync.Mutex
sequence int64
ecFirstSeenAt map[uint32]time.Time
rng *mrand.Rand
}
type ecVolumeInfo struct {
Collection string
NodeShards map[pb.ServerAddress][]uint32
}
type ecCleanupCandidate struct {
VolumeID uint32
FirstSeenAt time.Time
Info *ecVolumeInfo
}
func main() {
cfg, err := loadConfig()
if err != nil {
log.Fatalf("invalid flags: %v", err)
}
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
if cfg.MaxRuntime > 0 {
runCtx, cancel := context.WithTimeout(ctx, cfg.MaxRuntime)
defer cancel()
ctx = runCtx
}
r := &runner{
cfg: cfg,
httpClient: &http.Client{Timeout: cfg.RequestTimeout},
grpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
ecFirstSeenAt: make(map[uint32]time.Time),
rng: mrand.New(mrand.NewSource(time.Now().UnixNano())),
}
log.Printf(
"starting EC stress runner: masters=%s filer=%s prefix=%s collection=%s file_size=%d batch=%d write_interval=%s cleanup_interval=%s ec_min_age=%s max_cleanup=%d dry_run=%v",
strings.Join(cfg.MasterAddresses, ","),
cfg.FilerURL,
cfg.PathPrefix,
cfg.Collection,
cfg.FileSizeBytes,
cfg.BatchSize,
cfg.WriteInterval,
cfg.CleanupInterval,
cfg.EcMinAge,
cfg.MaxCleanupPerCycle,
cfg.DryRun,
)
if err := r.run(ctx); err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
log.Fatalf("runner stopped with error: %v", err)
}
log.Printf("runner stopped")
}
func loadConfig() (config, error) {
var masters string
cfg := config{}
flag.StringVar(&masters, "masters", "127.0.0.1:9333", "comma-separated master server addresses")
flag.StringVar(&cfg.FilerURL, "filer", "http://127.0.0.1:8888", "filer base URL")
flag.StringVar(&cfg.PathPrefix, "path_prefix", "/tmp/ec-stress", "filer path prefix for generated files")
flag.StringVar(&cfg.Collection, "collection", "ec_stress", "target collection for stress data")
fileSizeMB := flag.Int("file_size_mb", 8, "size per generated file in MB")
flag.IntVar(&cfg.BatchSize, "batch_size", 4, "files generated per write cycle")
flag.DurationVar(&cfg.WriteInterval, "write_interval", 5*time.Second, "interval between write cycles")
flag.DurationVar(&cfg.CleanupInterval, "cleanup_interval", 2*time.Minute, "interval between EC cleanup cycles")
flag.DurationVar(&cfg.EcMinAge, "ec_min_age", 30*time.Minute, "minimum observed EC age before deletion")
flag.IntVar(&cfg.MaxCleanupPerCycle, "max_cleanup_per_cycle", 4, "maximum EC volumes deleted per cleanup cycle")
flag.DurationVar(&cfg.RequestTimeout, "request_timeout", 20*time.Second, "HTTP/gRPC request timeout")
flag.DurationVar(&cfg.MaxRuntime, "max_runtime", 0, "maximum run duration; 0 means run until interrupted")
flag.BoolVar(&cfg.DryRun, "dry_run", false, "log actions without deleting EC shards")
flag.Parse()
cfg.MasterAddresses = splitNonEmpty(masters)
cfg.FileSizeBytes = int64(*fileSizeMB) * 1024 * 1024
if len(cfg.MasterAddresses) == 0 {
return cfg, fmt.Errorf("at least one master is required")
}
if cfg.FileSizeBytes <= 0 {
return cfg, fmt.Errorf("file_size_mb must be positive")
}
if cfg.BatchSize <= 0 {
return cfg, fmt.Errorf("batch_size must be positive")
}
if cfg.WriteInterval <= 0 {
return cfg, fmt.Errorf("write_interval must be positive")
}
if cfg.CleanupInterval <= 0 {
return cfg, fmt.Errorf("cleanup_interval must be positive")
}
if cfg.EcMinAge < 0 {
return cfg, fmt.Errorf("ec_min_age must be zero or positive")
}
// Note: EcMinAge == 0 intentionally disables the age guard, making EC volumes eligible for cleanup immediately.
if cfg.MaxCleanupPerCycle <= 0 {
return cfg, fmt.Errorf("max_cleanup_per_cycle must be positive")
}
if cfg.RequestTimeout <= 0 {
return cfg, fmt.Errorf("request_timeout must be positive")
}
cfg.PathPrefix = ensureLeadingSlash(strings.TrimSpace(cfg.PathPrefix))
cfg.Collection = strings.TrimSpace(cfg.Collection)
cfg.FilerURL = strings.TrimRight(strings.TrimSpace(cfg.FilerURL), "/")
if cfg.FilerURL == "" {
return cfg, fmt.Errorf("filer URL is required")
}
if _, err := url.ParseRequestURI(cfg.FilerURL); err != nil {
return cfg, fmt.Errorf("invalid filer URL %q: %w", cfg.FilerURL, err)
}
return cfg, nil
}
func (r *runner) run(ctx context.Context) error {
writeTicker := time.NewTicker(r.cfg.WriteInterval)
defer writeTicker.Stop()
cleanupTicker := time.NewTicker(r.cfg.CleanupInterval)
defer cleanupTicker.Stop()
r.runWriteCycle(ctx)
r.runCleanupCycle(ctx)
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-writeTicker.C:
r.runWriteCycle(ctx)
case <-cleanupTicker.C:
r.runCleanupCycle(ctx)
}
}
}
func (r *runner) runWriteCycle(ctx context.Context) {
for i := 0; i < r.cfg.BatchSize; i++ {
if ctx.Err() != nil {
return
}
if err := r.uploadOneFile(ctx); err != nil {
log.Printf("upload failed: %v", err)
}
}
}
func (r *runner) uploadOneFile(ctx context.Context) error {
sequence := r.nextSequence()
filePath := path.Join(r.cfg.PathPrefix, fmt.Sprintf("ec-stress-%d-%d.bin", time.Now().UnixNano(), sequence))
fileURL := r.cfg.FilerURL + filePath
if r.cfg.Collection != "" {
fileURL += "?collection=" + url.QueryEscape(r.cfg.Collection)
}
uploadCtx, cancel := context.WithTimeout(ctx, r.cfg.RequestTimeout)
defer cancel()
body := io.LimitReader(r.rng, r.cfg.FileSizeBytes)
request, err := http.NewRequestWithContext(uploadCtx, http.MethodPut, fileURL, body)
if err != nil {
return err
}
request.ContentLength = r.cfg.FileSizeBytes
request.Header.Set("Content-Type", "application/octet-stream")
response, err := r.httpClient.Do(request)
if err != nil {
return err
}
defer response.Body.Close()
io.Copy(io.Discard, response.Body)
if response.StatusCode < http.StatusOK || response.StatusCode >= http.StatusMultipleChoices {
return fmt.Errorf("upload %s returned %s", filePath, response.Status)
}
log.Printf("uploaded %s size=%d", filePath, r.cfg.FileSizeBytes)
return nil
}
func (r *runner) runCleanupCycle(ctx context.Context) {
volumeList, err := r.fetchVolumeList(ctx)
if err != nil {
log.Printf("cleanup skipped: fetch volume list failed: %v", err)
return
}
if volumeList == nil || volumeList.TopologyInfo == nil {
log.Printf("cleanup skipped: topology is empty")
return
}
ecVolumes := collectEcVolumes(volumeList.TopologyInfo, r.cfg.Collection)
candidates := r.selectCleanupCandidates(ecVolumes)
if len(candidates) == 0 {
log.Printf("cleanup: no EC volume candidate aged >= %s in collection=%q", r.cfg.EcMinAge, r.cfg.Collection)
return
}
log.Printf("cleanup: deleting up to %d EC volumes (found=%d)", r.cfg.MaxCleanupPerCycle, len(candidates))
deleted := 0
for _, candidate := range candidates {
if ctx.Err() != nil {
return
}
if r.cfg.DryRun {
log.Printf(
"cleanup dry-run: would delete EC volume=%d collection=%q seen_for=%s nodes=%d",
candidate.VolumeID,
candidate.Info.Collection,
time.Since(candidate.FirstSeenAt).Round(time.Second),
len(candidate.Info.NodeShards),
)
continue
}
if err := r.deleteEcVolume(ctx, candidate.VolumeID, candidate.Info); err != nil {
log.Printf("cleanup volume=%d failed: %v", candidate.VolumeID, err)
continue
}
deleted++
r.mu.Lock()
delete(r.ecFirstSeenAt, candidate.VolumeID)
r.mu.Unlock()
log.Printf("cleanup volume=%d completed", candidate.VolumeID)
}
log.Printf("cleanup finished: deleted=%d attempted=%d", deleted, len(candidates))
}
func (r *runner) fetchVolumeList(ctx context.Context) (*master_pb.VolumeListResponse, error) {
var lastErr error
for _, master := range r.cfg.MasterAddresses {
masterAddress := strings.TrimSpace(master)
if masterAddress == "" {
continue
}
var response *master_pb.VolumeListResponse
err := pb.WithMasterClient(false, pb.ServerAddress(masterAddress), r.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
callCtx, cancel := context.WithTimeout(ctx, r.cfg.RequestTimeout)
defer cancel()
resp, callErr := client.VolumeList(callCtx, &master_pb.VolumeListRequest{})
if callErr != nil {
return callErr
}
response = resp
return nil
})
if err == nil {
return response, nil
}
lastErr = err
}
if lastErr == nil {
lastErr = fmt.Errorf("no valid master address")
}
return nil, lastErr
}
func collectEcVolumes(topology *master_pb.TopologyInfo, collection string) map[uint32]*ecVolumeInfo {
normalizedCollection := strings.TrimSpace(collection)
volumeShardSets := make(map[uint32]map[pb.ServerAddress]map[uint32]struct{})
volumeCollection := make(map[uint32]string)
for _, dc := range topology.GetDataCenterInfos() {
for _, rack := range dc.GetRackInfos() {
for _, node := range rack.GetDataNodeInfos() {
server := pb.NewServerAddressFromDataNode(node)
for _, disk := range node.GetDiskInfos() {
for _, shardInfo := range disk.GetEcShardInfos() {
if shardInfo == nil || shardInfo.Id == 0 {
continue
}
if normalizedCollection != "" && strings.TrimSpace(shardInfo.Collection) != normalizedCollection {
continue
}
shards := erasure_coding.ShardsInfoFromVolumeEcShardInformationMessage(shardInfo).IdsUint32()
if len(shards) == 0 {
continue
}
perVolume := volumeShardSets[shardInfo.Id]
if perVolume == nil {
perVolume = make(map[pb.ServerAddress]map[uint32]struct{})
volumeShardSets[shardInfo.Id] = perVolume
}
perNode := perVolume[server]
if perNode == nil {
perNode = make(map[uint32]struct{})
perVolume[server] = perNode
}
for _, shardID := range shards {
perNode[shardID] = struct{}{}
}
volumeCollection[shardInfo.Id] = shardInfo.Collection
}
}
}
}
}
result := make(map[uint32]*ecVolumeInfo, len(volumeShardSets))
for volumeID, perNode := range volumeShardSets {
info := &ecVolumeInfo{
Collection: volumeCollection[volumeID],
NodeShards: make(map[pb.ServerAddress][]uint32, len(perNode)),
}
for server, shardSet := range perNode {
shardIDs := make([]uint32, 0, len(shardSet))
for shardID := range shardSet {
shardIDs = append(shardIDs, shardID)
}
sort.Slice(shardIDs, func(i, j int) bool { return shardIDs[i] < shardIDs[j] })
info.NodeShards[server] = shardIDs
}
result[volumeID] = info
}
return result
}
func (r *runner) selectCleanupCandidates(ecVolumes map[uint32]*ecVolumeInfo) []ecCleanupCandidate {
now := time.Now()
r.mu.Lock()
defer r.mu.Unlock()
for volumeID := range ecVolumes {
if _, exists := r.ecFirstSeenAt[volumeID]; !exists {
r.ecFirstSeenAt[volumeID] = now
}
}
for volumeID := range r.ecFirstSeenAt {
if _, exists := ecVolumes[volumeID]; !exists {
delete(r.ecFirstSeenAt, volumeID)
}
}
candidates := make([]ecCleanupCandidate, 0, len(ecVolumes))
for volumeID, info := range ecVolumes {
firstSeenAt := r.ecFirstSeenAt[volumeID]
if r.cfg.EcMinAge > 0 && now.Sub(firstSeenAt) < r.cfg.EcMinAge {
continue
}
candidates = append(candidates, ecCleanupCandidate{
VolumeID: volumeID,
FirstSeenAt: firstSeenAt,
Info: info,
})
}
sort.Slice(candidates, func(i, j int) bool {
if candidates[i].FirstSeenAt.Equal(candidates[j].FirstSeenAt) {
return candidates[i].VolumeID < candidates[j].VolumeID
}
return candidates[i].FirstSeenAt.Before(candidates[j].FirstSeenAt)
})
if len(candidates) > r.cfg.MaxCleanupPerCycle {
candidates = candidates[:r.cfg.MaxCleanupPerCycle]
}
return candidates
}
func (r *runner) deleteEcVolume(ctx context.Context, volumeID uint32, info *ecVolumeInfo) error {
if info == nil {
return fmt.Errorf("ec volume %d has no topology info", volumeID)
}
failureCount := 0
for server, shardIDs := range info.NodeShards {
err := pb.WithVolumeServerClient(false, server, r.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
unmountCtx, unmountCancel := context.WithTimeout(ctx, r.cfg.RequestTimeout)
defer unmountCancel()
if _, err := client.VolumeEcShardsUnmount(unmountCtx, &volume_server_pb.VolumeEcShardsUnmountRequest{
VolumeId: volumeID,
ShardIds: shardIDs,
}); err != nil {
log.Printf("volume %d ec shards unmount on %s failed: %v", volumeID, server, err)
}
if len(shardIDs) > 0 {
deleteCtx, deleteCancel := context.WithTimeout(ctx, r.cfg.RequestTimeout)
defer deleteCancel()
if _, err := client.VolumeEcShardsDelete(deleteCtx, &volume_server_pb.VolumeEcShardsDeleteRequest{
VolumeId: volumeID,
Collection: r.cfg.Collection,
ShardIds: shardIDs,
}); err != nil {
return err
}
}
finalDeleteCtx, finalDeleteCancel := context.WithTimeout(ctx, r.cfg.RequestTimeout)
defer finalDeleteCancel()
if _, err := client.VolumeDelete(finalDeleteCtx, &volume_server_pb.VolumeDeleteRequest{
VolumeId: volumeID,
}); err != nil {
log.Printf("volume %d delete on %s failed: %v", volumeID, server, err)
}
return nil
})
if err != nil {
failureCount++
log.Printf("cleanup volume=%d server=%s shards=%v failed: %v", volumeID, server, shardIDs, err)
}
}
if failureCount == len(info.NodeShards) && failureCount > 0 {
return fmt.Errorf("all shard deletions failed for volume %d", volumeID)
}
if failureCount > 0 {
return fmt.Errorf("partial shard deletion failure for volume %d", volumeID)
}
return nil
}
func (r *runner) nextSequence() int64 {
r.mu.Lock()
defer r.mu.Unlock()
r.sequence++
return r.sequence
}
func splitNonEmpty(value string) []string {
parts := strings.Split(value, ",")
result := make([]string, 0, len(parts))
for _, part := range parts {
trimmed := strings.TrimSpace(part)
if trimmed != "" {
result = append(result, trimmed)
}
}
return result
}
func ensureLeadingSlash(value string) string {
if value == "" {
return "/"
}
if strings.HasPrefix(value, "/") {
return value
}
return "/" + value
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,735 @@
package dash
import (
"context"
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/http"
"sort"
"strconv"
"strings"
"time"
"github.com/gin-gonic/gin"
"github.com/seaweedfs/seaweedfs/weed/admin/plugin"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
const (
defaultPluginDetectionTimeout = 45 * time.Second
defaultPluginExecutionTimeout = 90 * time.Second
maxPluginDetectionTimeout = 5 * time.Minute
maxPluginExecutionTimeout = 10 * time.Minute
defaultPluginRunTimeout = 5 * time.Minute
maxPluginRunTimeout = 30 * time.Minute
)
// GetPluginStatusAPI returns plugin status.
func (s *AdminServer) GetPluginStatusAPI(c *gin.Context) {
plugin := s.GetPlugin()
if plugin == nil {
c.JSON(http.StatusOK, gin.H{
"enabled": false,
"worker_grpc_port": s.GetWorkerGrpcPort(),
})
return
}
c.JSON(http.StatusOK, gin.H{
"enabled": true,
"configured": plugin.IsConfigured(),
"base_dir": plugin.BaseDir(),
"worker_count": len(plugin.ListWorkers()),
"worker_grpc_port": s.GetWorkerGrpcPort(),
})
}
// GetPluginWorkersAPI returns currently connected plugin workers.
func (s *AdminServer) GetPluginWorkersAPI(c *gin.Context) {
workers := s.GetPluginWorkers()
if workers == nil {
c.JSON(http.StatusOK, []interface{}{})
return
}
c.JSON(http.StatusOK, workers)
}
// GetPluginJobTypesAPI returns known plugin job types from workers and persisted data.
func (s *AdminServer) GetPluginJobTypesAPI(c *gin.Context) {
jobTypes, err := s.ListPluginJobTypes()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if jobTypes == nil {
c.JSON(http.StatusOK, []interface{}{})
return
}
c.JSON(http.StatusOK, jobTypes)
}
// GetPluginJobsAPI returns tracked jobs for monitoring.
func (s *AdminServer) GetPluginJobsAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Query("job_type"))
state := strings.TrimSpace(c.Query("state"))
limit := parsePositiveInt(c.Query("limit"), 200)
jobs := s.ListPluginJobs(jobType, state, limit)
if jobs == nil {
c.JSON(http.StatusOK, []interface{}{})
return
}
c.JSON(http.StatusOK, jobs)
}
// GetPluginJobAPI returns one tracked job.
func (s *AdminServer) GetPluginJobAPI(c *gin.Context) {
jobID := strings.TrimSpace(c.Param("jobId"))
if jobID == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobId is required"})
return
}
job, found := s.GetPluginJob(jobID)
if !found {
c.JSON(http.StatusNotFound, gin.H{"error": "job not found"})
return
}
c.JSON(http.StatusOK, job)
}
// GetPluginJobDetailAPI returns detailed information for one tracked plugin job.
func (s *AdminServer) GetPluginJobDetailAPI(c *gin.Context) {
jobID := strings.TrimSpace(c.Param("jobId"))
if jobID == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobId is required"})
return
}
activityLimit := parsePositiveInt(c.Query("activity_limit"), 500)
relatedLimit := parsePositiveInt(c.Query("related_limit"), 20)
detail, found, err := s.GetPluginJobDetail(jobID, activityLimit, relatedLimit)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if !found || detail == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "job detail not found"})
return
}
c.JSON(http.StatusOK, detail)
}
// GetPluginActivitiesAPI returns recent plugin activities.
func (s *AdminServer) GetPluginActivitiesAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Query("job_type"))
limit := parsePositiveInt(c.Query("limit"), 500)
activities := s.ListPluginActivities(jobType, limit)
if activities == nil {
c.JSON(http.StatusOK, []interface{}{})
return
}
c.JSON(http.StatusOK, activities)
}
// GetPluginSchedulerStatesAPI returns per-job-type scheduler status for monitoring.
func (s *AdminServer) GetPluginSchedulerStatesAPI(c *gin.Context) {
jobTypeFilter := strings.TrimSpace(c.Query("job_type"))
states, err := s.ListPluginSchedulerStates()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if jobTypeFilter != "" {
filtered := make([]interface{}, 0, len(states))
for _, state := range states {
if state.JobType == jobTypeFilter {
filtered = append(filtered, state)
}
}
c.JSON(http.StatusOK, filtered)
return
}
if states == nil {
c.JSON(http.StatusOK, []interface{}{})
return
}
c.JSON(http.StatusOK, states)
}
// RequestPluginJobTypeSchemaAPI asks a worker for one job type schema.
func (s *AdminServer) RequestPluginJobTypeSchemaAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
forceRefresh := c.DefaultQuery("force_refresh", "false") == "true"
ctx, cancel := context.WithTimeout(c.Request.Context(), defaultPluginDetectionTimeout)
defer cancel()
descriptor, err := s.RequestPluginJobTypeDescriptor(ctx, jobType, forceRefresh)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
renderProtoJSON(c, http.StatusOK, descriptor)
}
// GetPluginJobTypeDescriptorAPI returns persisted descriptor for a job type.
func (s *AdminServer) GetPluginJobTypeDescriptorAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
descriptor, err := s.LoadPluginJobTypeDescriptor(jobType)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if descriptor == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "descriptor not found"})
return
}
renderProtoJSON(c, http.StatusOK, descriptor)
}
// GetPluginJobTypeConfigAPI loads persisted config for a job type.
func (s *AdminServer) GetPluginJobTypeConfigAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
config, err := s.LoadPluginJobTypeConfig(jobType)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if config == nil {
config = &plugin_pb.PersistedJobTypeConfig{
JobType: jobType,
AdminConfigValues: map[string]*plugin_pb.ConfigValue{},
WorkerConfigValues: map[string]*plugin_pb.ConfigValue{},
AdminRuntime: &plugin_pb.AdminRuntimeConfig{},
}
}
renderProtoJSON(c, http.StatusOK, config)
}
// UpdatePluginJobTypeConfigAPI stores persisted config for a job type.
func (s *AdminServer) UpdatePluginJobTypeConfigAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
config := &plugin_pb.PersistedJobTypeConfig{}
if err := parseProtoJSONBody(c, config); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
config.JobType = jobType
if config.UpdatedAt == nil {
config.UpdatedAt = timestamppb.Now()
}
if config.AdminRuntime == nil {
config.AdminRuntime = &plugin_pb.AdminRuntimeConfig{}
}
if config.AdminConfigValues == nil {
config.AdminConfigValues = map[string]*plugin_pb.ConfigValue{}
}
if config.WorkerConfigValues == nil {
config.WorkerConfigValues = map[string]*plugin_pb.ConfigValue{}
}
username := c.GetString("username")
if username == "" {
username = "admin"
}
config.UpdatedBy = username
if err := s.SavePluginJobTypeConfig(config); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
renderProtoJSON(c, http.StatusOK, config)
}
// GetPluginRunHistoryAPI returns bounded run history for a job type.
func (s *AdminServer) GetPluginRunHistoryAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
history, err := s.GetPluginRunHistory(jobType)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if history == nil {
c.JSON(http.StatusOK, gin.H{
"job_type": jobType,
"successful_runs": []interface{}{},
"error_runs": []interface{}{},
"last_updated_time": nil,
})
return
}
c.JSON(http.StatusOK, history)
}
// TriggerPluginDetectionAPI runs one detector for this job type and returns proposals.
func (s *AdminServer) TriggerPluginDetectionAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
var req struct {
ClusterContext json.RawMessage `json:"cluster_context"`
MaxResults int32 `json:"max_results"`
TimeoutSeconds int `json:"timeout_seconds"`
}
if err := c.ShouldBindJSON(&req); err != nil && err != io.EOF {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body: " + err.Error()})
return
}
clusterContext, err := s.parseOrBuildClusterContext(req.ClusterContext)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
timeout := normalizeTimeout(req.TimeoutSeconds, defaultPluginDetectionTimeout, maxPluginDetectionTimeout)
ctx, cancel := context.WithTimeout(c.Request.Context(), timeout)
defer cancel()
report, err := s.RunPluginDetectionWithReport(ctx, jobType, clusterContext, req.MaxResults)
proposals := make([]*plugin_pb.JobProposal, 0)
requestID := ""
detectorWorkerID := ""
totalProposals := int32(0)
if report != nil {
proposals = report.Proposals
requestID = report.RequestID
detectorWorkerID = report.WorkerID
if report.Complete != nil {
totalProposals = report.Complete.TotalProposals
}
}
proposalPayloads := make([]map[string]interface{}, 0, len(proposals))
for _, proposal := range proposals {
payload, marshalErr := protoMessageToMap(proposal)
if marshalErr != nil {
glog.Warningf("failed to marshal proposal for jobType=%s: %v", jobType, marshalErr)
continue
}
proposalPayloads = append(proposalPayloads, payload)
}
sort.Slice(proposalPayloads, func(i, j int) bool {
iPriorityStr, _ := proposalPayloads[i]["priority"].(string)
jPriorityStr, _ := proposalPayloads[j]["priority"].(string)
iPriority := plugin_pb.JobPriority_value[iPriorityStr]
jPriority := plugin_pb.JobPriority_value[jPriorityStr]
if iPriority != jPriority {
return iPriority > jPriority
}
iID, _ := proposalPayloads[i]["proposal_id"].(string)
jID, _ := proposalPayloads[j]["proposal_id"].(string)
return iID < jID
})
activities := s.ListPluginActivities(jobType, 500)
filteredActivities := make([]interface{}, 0, len(activities))
if requestID != "" {
for i := len(activities) - 1; i >= 0; i-- {
activity := activities[i]
if activity.RequestID != requestID {
continue
}
filteredActivities = append(filteredActivities, activity)
}
}
response := gin.H{
"job_type": jobType,
"request_id": requestID,
"detector_worker_id": detectorWorkerID,
"total_proposals": totalProposals,
"count": len(proposalPayloads),
"proposals": proposalPayloads,
"activities": filteredActivities,
}
if err != nil {
response["error"] = err.Error()
c.JSON(http.StatusInternalServerError, response)
return
}
c.JSON(http.StatusOK, response)
}
// RunPluginJobTypeAPI runs full workflow for one job type: detect then dispatch detected jobs.
func (s *AdminServer) RunPluginJobTypeAPI(c *gin.Context) {
jobType := strings.TrimSpace(c.Param("jobType"))
if jobType == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "jobType is required"})
return
}
var req struct {
ClusterContext json.RawMessage `json:"cluster_context"`
MaxResults int32 `json:"max_results"`
TimeoutSeconds int `json:"timeout_seconds"`
Attempt int32 `json:"attempt"`
}
if err := c.ShouldBindJSON(&req); err != nil && err != io.EOF {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body: " + err.Error()})
return
}
if req.Attempt < 1 {
req.Attempt = 1
}
clusterContext, err := s.parseOrBuildClusterContext(req.ClusterContext)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
timeout := normalizeTimeout(req.TimeoutSeconds, defaultPluginRunTimeout, maxPluginRunTimeout)
ctx, cancel := context.WithTimeout(c.Request.Context(), timeout)
defer cancel()
proposals, err := s.RunPluginDetection(ctx, jobType, clusterContext, req.MaxResults)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
detectedCount := len(proposals)
filteredProposals, skippedActiveCount, err := s.FilterPluginProposalsWithActiveJobs(jobType, proposals)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
type executionResult struct {
JobID string `json:"job_id"`
Success bool `json:"success"`
Error string `json:"error,omitempty"`
Completion map[string]interface{} `json:"completion,omitempty"`
}
results := make([]executionResult, 0, len(filteredProposals))
successCount := 0
errorCount := 0
for index, proposal := range filteredProposals {
job := buildJobSpecFromProposal(jobType, proposal, index)
completed, execErr := s.ExecutePluginJob(ctx, job, clusterContext, req.Attempt)
result := executionResult{
JobID: job.JobId,
Success: execErr == nil,
}
if completed != nil {
if payload, marshalErr := protoMessageToMap(completed); marshalErr == nil {
result.Completion = payload
}
}
if execErr != nil {
result.Error = execErr.Error()
errorCount++
} else {
successCount++
}
results = append(results, result)
}
c.JSON(http.StatusOK, gin.H{
"job_type": jobType,
"detected_count": detectedCount,
"ready_to_execute_count": len(filteredProposals),
"skipped_active_count": skippedActiveCount,
"executed_count": len(results),
"success_count": successCount,
"error_count": errorCount,
"execution_results": results,
})
}
// ExecutePluginJobAPI executes one job on a capable worker and waits for completion.
func (s *AdminServer) ExecutePluginJobAPI(c *gin.Context) {
var req struct {
Job json.RawMessage `json:"job"`
ClusterContext json.RawMessage `json:"cluster_context"`
Attempt int32 `json:"attempt"`
TimeoutSeconds int `json:"timeout_seconds"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body: " + err.Error()})
return
}
if len(req.Job) == 0 {
c.JSON(http.StatusBadRequest, gin.H{"error": "job is required"})
return
}
job := &plugin_pb.JobSpec{}
if err := (protojson.UnmarshalOptions{DiscardUnknown: true}).Unmarshal(req.Job, job); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid job payload: " + err.Error()})
return
}
clusterContext, err := s.parseOrBuildClusterContext(req.ClusterContext)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if req.Attempt < 1 {
req.Attempt = 1
}
timeout := normalizeTimeout(req.TimeoutSeconds, defaultPluginExecutionTimeout, maxPluginExecutionTimeout)
ctx, cancel := context.WithTimeout(c.Request.Context(), timeout)
defer cancel()
completed, err := s.ExecutePluginJob(ctx, job, clusterContext, req.Attempt)
if err != nil {
if completed != nil {
payload, marshalErr := protoMessageToMap(completed)
if marshalErr == nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error(), "completion": payload})
return
}
}
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
renderProtoJSON(c, http.StatusOK, completed)
}
func (s *AdminServer) parseOrBuildClusterContext(raw json.RawMessage) (*plugin_pb.ClusterContext, error) {
if len(raw) == 0 {
return s.buildDefaultPluginClusterContext(), nil
}
contextMessage := &plugin_pb.ClusterContext{}
if err := (protojson.UnmarshalOptions{DiscardUnknown: true}).Unmarshal(raw, contextMessage); err != nil {
return nil, fmt.Errorf("invalid cluster_context payload: %w", err)
}
fallback := s.buildDefaultPluginClusterContext()
if len(contextMessage.MasterGrpcAddresses) == 0 {
contextMessage.MasterGrpcAddresses = append(contextMessage.MasterGrpcAddresses, fallback.MasterGrpcAddresses...)
}
if len(contextMessage.FilerGrpcAddresses) == 0 {
contextMessage.FilerGrpcAddresses = append(contextMessage.FilerGrpcAddresses, fallback.FilerGrpcAddresses...)
}
if len(contextMessage.VolumeGrpcAddresses) == 0 {
contextMessage.VolumeGrpcAddresses = append(contextMessage.VolumeGrpcAddresses, fallback.VolumeGrpcAddresses...)
}
if contextMessage.Metadata == nil {
contextMessage.Metadata = map[string]string{}
}
contextMessage.Metadata["source"] = "admin"
return contextMessage, nil
}
func (s *AdminServer) buildDefaultPluginClusterContext() *plugin_pb.ClusterContext {
clusterContext := &plugin_pb.ClusterContext{
MasterGrpcAddresses: make([]string, 0),
FilerGrpcAddresses: make([]string, 0),
VolumeGrpcAddresses: make([]string, 0),
Metadata: map[string]string{
"source": "admin",
},
}
masterAddress := string(s.masterClient.GetMaster(context.Background()))
if masterAddress != "" {
clusterContext.MasterGrpcAddresses = append(clusterContext.MasterGrpcAddresses, masterAddress)
}
filerSeen := map[string]struct{}{}
for _, filer := range s.GetAllFilers() {
filer = strings.TrimSpace(filer)
if filer == "" {
continue
}
if _, exists := filerSeen[filer]; exists {
continue
}
filerSeen[filer] = struct{}{}
clusterContext.FilerGrpcAddresses = append(clusterContext.FilerGrpcAddresses, filer)
}
volumeSeen := map[string]struct{}{}
if volumeServers, err := s.GetClusterVolumeServers(); err == nil {
for _, server := range volumeServers.VolumeServers {
address := strings.TrimSpace(server.GetDisplayAddress())
if address == "" {
address = strings.TrimSpace(server.Address)
}
if address == "" {
continue
}
if _, exists := volumeSeen[address]; exists {
continue
}
volumeSeen[address] = struct{}{}
clusterContext.VolumeGrpcAddresses = append(clusterContext.VolumeGrpcAddresses, address)
}
} else {
glog.V(1).Infof("failed to build default plugin volume context: %v", err)
}
sort.Strings(clusterContext.MasterGrpcAddresses)
sort.Strings(clusterContext.FilerGrpcAddresses)
sort.Strings(clusterContext.VolumeGrpcAddresses)
return clusterContext
}
const parseProtoJSONBodyMaxBytes = 1 << 20 // 1 MB
func parseProtoJSONBody(c *gin.Context, message proto.Message) error {
limitedBody := http.MaxBytesReader(c.Writer, c.Request.Body, parseProtoJSONBodyMaxBytes)
data, err := io.ReadAll(limitedBody)
if err != nil {
return fmt.Errorf("failed to read request body: %w", err)
}
if len(data) == 0 {
return fmt.Errorf("request body is empty")
}
if err := (protojson.UnmarshalOptions{DiscardUnknown: true}).Unmarshal(data, message); err != nil {
return fmt.Errorf("invalid protobuf json: %w", err)
}
return nil
}
func renderProtoJSON(c *gin.Context, statusCode int, message proto.Message) {
payload, err := protojson.MarshalOptions{
UseProtoNames: true,
EmitUnpopulated: true,
}.Marshal(message)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to encode response: " + err.Error()})
return
}
c.Data(statusCode, "application/json", payload)
}
func protoMessageToMap(message proto.Message) (map[string]interface{}, error) {
payload, err := protojson.MarshalOptions{UseProtoNames: true}.Marshal(message)
if err != nil {
return nil, err
}
out := map[string]interface{}{}
if err := json.Unmarshal(payload, &out); err != nil {
return nil, err
}
return out, nil
}
func normalizeTimeout(timeoutSeconds int, defaultTimeout, maxTimeout time.Duration) time.Duration {
if timeoutSeconds <= 0 {
return defaultTimeout
}
timeout := time.Duration(timeoutSeconds) * time.Second
if timeout > maxTimeout {
return maxTimeout
}
return timeout
}
func buildJobSpecFromProposal(jobType string, proposal *plugin_pb.JobProposal, index int) *plugin_pb.JobSpec {
now := timestamppb.Now()
suffix := make([]byte, 4)
if _, err := rand.Read(suffix); err != nil {
// Fallback to simpler ID if rand fails
suffix = []byte(fmt.Sprintf("%d", index))
}
jobID := fmt.Sprintf("%s-%d-%s", jobType, now.AsTime().UnixNano(), hex.EncodeToString(suffix))
jobSpec := &plugin_pb.JobSpec{
JobId: jobID,
JobType: jobType,
Priority: plugin_pb.JobPriority_JOB_PRIORITY_NORMAL,
CreatedAt: now,
Labels: make(map[string]string),
Parameters: make(map[string]*plugin_pb.ConfigValue),
DedupeKey: "",
}
if proposal != nil {
jobSpec.Summary = proposal.Summary
jobSpec.Detail = proposal.Detail
if proposal.Priority != plugin_pb.JobPriority_JOB_PRIORITY_UNSPECIFIED {
jobSpec.Priority = proposal.Priority
}
jobSpec.DedupeKey = proposal.DedupeKey
jobSpec.Parameters = plugin.CloneConfigValueMap(proposal.Parameters)
if proposal.Labels != nil {
for k, v := range proposal.Labels {
jobSpec.Labels[k] = v
}
}
}
return jobSpec
}
func parsePositiveInt(raw string, defaultValue int) int {
value, err := strconv.Atoi(strings.TrimSpace(raw))
if err != nil || value <= 0 {
return defaultValue
}
return value
}
// cloneConfigValueMap is now exported by the plugin package as CloneConfigValueMap

View File

@@ -0,0 +1,33 @@
package dash
import (
"testing"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
)
func TestBuildJobSpecFromProposalDoesNotReuseProposalID(t *testing.T) {
t.Parallel()
proposal := &plugin_pb.JobProposal{
ProposalId: "vacuum-2",
DedupeKey: "vacuum:2",
JobType: "vacuum",
}
jobA := buildJobSpecFromProposal("vacuum", proposal, 0)
jobB := buildJobSpecFromProposal("vacuum", proposal, 1)
if jobA.JobId == proposal.ProposalId {
t.Fatalf("job id must not reuse proposal id: %s", jobA.JobId)
}
if jobB.JobId == proposal.ProposalId {
t.Fatalf("job id must not reuse proposal id: %s", jobB.JobId)
}
if jobA.JobId == jobB.JobId {
t.Fatalf("job ids must be unique across jobs: %s", jobA.JobId)
}
if jobA.DedupeKey != proposal.DedupeKey {
t.Fatalf("dedupe key must be preserved: got=%s want=%s", jobA.DedupeKey, proposal.DedupeKey)
}
}

View File

@@ -5,12 +5,14 @@ import (
"fmt" "fmt"
"io" "io"
"net" "net"
"strconv"
"sync" "sync"
"time" "time"
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance" "github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb" "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
"github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
@@ -93,6 +95,10 @@ func (s *WorkerGrpcServer) StartWithTLS(port int) error {
grpcServer := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.admin")) grpcServer := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.admin"))
worker_pb.RegisterWorkerServiceServer(grpcServer, s) worker_pb.RegisterWorkerServiceServer(grpcServer, s)
if plugin := s.adminServer.GetPlugin(); plugin != nil {
plugin_pb.RegisterPluginControlServiceServer(grpcServer, plugin)
glog.V(0).Infof("Plugin gRPC service registered on worker gRPC server")
}
s.grpcServer = grpcServer s.grpcServer = grpcServer
s.listener = listener s.listener = listener
@@ -114,6 +120,25 @@ func (s *WorkerGrpcServer) StartWithTLS(port int) error {
return nil return nil
} }
// ListenPort returns the currently bound worker gRPC listen port.
func (s *WorkerGrpcServer) ListenPort() int {
if s == nil || s.listener == nil {
return 0
}
if tcpAddr, ok := s.listener.Addr().(*net.TCPAddr); ok {
return tcpAddr.Port
}
_, portStr, err := net.SplitHostPort(s.listener.Addr().String())
if err != nil {
return 0
}
port, err := strconv.Atoi(portStr)
if err != nil {
return 0
}
return port
}
// Stop stops the gRPC server // Stop stops the gRPC server
func (s *WorkerGrpcServer) Stop() error { func (s *WorkerGrpcServer) Stop() error {
if !s.running { if !s.running {

View File

@@ -23,7 +23,7 @@ type AdminHandlers struct {
fileBrowserHandlers *FileBrowserHandlers fileBrowserHandlers *FileBrowserHandlers
userHandlers *UserHandlers userHandlers *UserHandlers
policyHandlers *PolicyHandlers policyHandlers *PolicyHandlers
maintenanceHandlers *MaintenanceHandlers pluginHandlers *PluginHandlers
mqHandlers *MessageQueueHandlers mqHandlers *MessageQueueHandlers
serviceAccountHandlers *ServiceAccountHandlers serviceAccountHandlers *ServiceAccountHandlers
} }
@@ -35,7 +35,7 @@ func NewAdminHandlers(adminServer *dash.AdminServer) *AdminHandlers {
fileBrowserHandlers := NewFileBrowserHandlers(adminServer) fileBrowserHandlers := NewFileBrowserHandlers(adminServer)
userHandlers := NewUserHandlers(adminServer) userHandlers := NewUserHandlers(adminServer)
policyHandlers := NewPolicyHandlers(adminServer) policyHandlers := NewPolicyHandlers(adminServer)
maintenanceHandlers := NewMaintenanceHandlers(adminServer) pluginHandlers := NewPluginHandlers(adminServer)
mqHandlers := NewMessageQueueHandlers(adminServer) mqHandlers := NewMessageQueueHandlers(adminServer)
serviceAccountHandlers := NewServiceAccountHandlers(adminServer) serviceAccountHandlers := NewServiceAccountHandlers(adminServer)
return &AdminHandlers{ return &AdminHandlers{
@@ -45,7 +45,7 @@ func NewAdminHandlers(adminServer *dash.AdminServer) *AdminHandlers {
fileBrowserHandlers: fileBrowserHandlers, fileBrowserHandlers: fileBrowserHandlers,
userHandlers: userHandlers, userHandlers: userHandlers,
policyHandlers: policyHandlers, policyHandlers: policyHandlers,
maintenanceHandlers: maintenanceHandlers, pluginHandlers: pluginHandlers,
mqHandlers: mqHandlers, mqHandlers: mqHandlers,
serviceAccountHandlers: serviceAccountHandlers, serviceAccountHandlers: serviceAccountHandlers,
} }
@@ -119,14 +119,12 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, adminUser,
protected.GET("/mq/topics", h.mqHandlers.ShowTopics) protected.GET("/mq/topics", h.mqHandlers.ShowTopics)
protected.GET("/mq/topics/:namespace/:topic", h.mqHandlers.ShowTopicDetails) protected.GET("/mq/topics/:namespace/:topic", h.mqHandlers.ShowTopicDetails)
// Maintenance system routes protected.GET("/plugin", h.pluginHandlers.ShowPlugin)
protected.GET("/maintenance", h.maintenanceHandlers.ShowMaintenanceQueue) protected.GET("/plugin/configuration", h.pluginHandlers.ShowPluginConfiguration)
protected.GET("/maintenance/workers", h.maintenanceHandlers.ShowMaintenanceWorkers) protected.GET("/plugin/queue", h.pluginHandlers.ShowPluginQueue)
protected.GET("/maintenance/config", h.maintenanceHandlers.ShowMaintenanceConfig) protected.GET("/plugin/detection", h.pluginHandlers.ShowPluginDetection)
protected.POST("/maintenance/config", dash.RequireWriteAccess(), h.maintenanceHandlers.UpdateMaintenanceConfig) protected.GET("/plugin/execution", h.pluginHandlers.ShowPluginExecution)
protected.GET("/maintenance/config/:taskType", h.maintenanceHandlers.ShowTaskConfig) protected.GET("/plugin/monitoring", h.pluginHandlers.ShowPluginMonitoring)
protected.POST("/maintenance/config/:taskType", dash.RequireWriteAccess(), h.maintenanceHandlers.UpdateTaskConfig)
protected.GET("/maintenance/tasks/:id", h.maintenanceHandlers.ShowTaskDetail)
// API routes for AJAX calls // API routes for AJAX calls
api := r.Group("/api") api := r.Group("/api")
@@ -226,20 +224,25 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, adminUser,
volumeApi.POST("/:id/:server/vacuum", dash.RequireWriteAccess(), h.clusterHandlers.VacuumVolume) volumeApi.POST("/:id/:server/vacuum", dash.RequireWriteAccess(), h.clusterHandlers.VacuumVolume)
} }
// Maintenance API routes // Plugin API routes
maintenanceApi := api.Group("/maintenance") pluginApi := api.Group("/plugin")
{ {
maintenanceApi.POST("/scan", dash.RequireWriteAccess(), h.adminServer.TriggerMaintenanceScan) pluginApi.GET("/status", h.adminServer.GetPluginStatusAPI)
maintenanceApi.GET("/tasks", h.adminServer.GetMaintenanceTasks) pluginApi.GET("/workers", h.adminServer.GetPluginWorkersAPI)
maintenanceApi.GET("/tasks/:id", h.adminServer.GetMaintenanceTask) pluginApi.GET("/job-types", h.adminServer.GetPluginJobTypesAPI)
maintenanceApi.GET("/tasks/:id/detail", h.adminServer.GetMaintenanceTaskDetailAPI) pluginApi.GET("/jobs", h.adminServer.GetPluginJobsAPI)
maintenanceApi.POST("/tasks/:id/cancel", dash.RequireWriteAccess(), h.adminServer.CancelMaintenanceTask) pluginApi.GET("/jobs/:jobId", h.adminServer.GetPluginJobAPI)
maintenanceApi.GET("/workers", h.adminServer.GetMaintenanceWorkersAPI) pluginApi.GET("/jobs/:jobId/detail", h.adminServer.GetPluginJobDetailAPI)
maintenanceApi.GET("/workers/:id", h.adminServer.GetMaintenanceWorker) pluginApi.GET("/activities", h.adminServer.GetPluginActivitiesAPI)
maintenanceApi.GET("/workers/:id/logs", h.adminServer.GetWorkerLogs) pluginApi.GET("/scheduler-states", h.adminServer.GetPluginSchedulerStatesAPI)
maintenanceApi.GET("/stats", h.adminServer.GetMaintenanceStats) pluginApi.GET("/job-types/:jobType/descriptor", h.adminServer.GetPluginJobTypeDescriptorAPI)
maintenanceApi.GET("/config", h.adminServer.GetMaintenanceConfigAPI) pluginApi.POST("/job-types/:jobType/schema", h.adminServer.RequestPluginJobTypeSchemaAPI)
maintenanceApi.PUT("/config", dash.RequireWriteAccess(), h.adminServer.UpdateMaintenanceConfigAPI) pluginApi.GET("/job-types/:jobType/config", h.adminServer.GetPluginJobTypeConfigAPI)
pluginApi.PUT("/job-types/:jobType/config", dash.RequireWriteAccess(), h.adminServer.UpdatePluginJobTypeConfigAPI)
pluginApi.GET("/job-types/:jobType/runs", h.adminServer.GetPluginRunHistoryAPI)
pluginApi.POST("/job-types/:jobType/detect", dash.RequireWriteAccess(), h.adminServer.TriggerPluginDetectionAPI)
pluginApi.POST("/job-types/:jobType/run", dash.RequireWriteAccess(), h.adminServer.RunPluginJobTypeAPI)
pluginApi.POST("/jobs/execute", dash.RequireWriteAccess(), h.adminServer.ExecutePluginJobAPI)
} }
// Message Queue API routes // Message Queue API routes
@@ -292,14 +295,12 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, adminUser,
r.GET("/mq/topics", h.mqHandlers.ShowTopics) r.GET("/mq/topics", h.mqHandlers.ShowTopics)
r.GET("/mq/topics/:namespace/:topic", h.mqHandlers.ShowTopicDetails) r.GET("/mq/topics/:namespace/:topic", h.mqHandlers.ShowTopicDetails)
// Maintenance system routes r.GET("/plugin", h.pluginHandlers.ShowPlugin)
r.GET("/maintenance", h.maintenanceHandlers.ShowMaintenanceQueue) r.GET("/plugin/configuration", h.pluginHandlers.ShowPluginConfiguration)
r.GET("/maintenance/workers", h.maintenanceHandlers.ShowMaintenanceWorkers) r.GET("/plugin/queue", h.pluginHandlers.ShowPluginQueue)
r.GET("/maintenance/config", h.maintenanceHandlers.ShowMaintenanceConfig) r.GET("/plugin/detection", h.pluginHandlers.ShowPluginDetection)
r.POST("/maintenance/config", h.maintenanceHandlers.UpdateMaintenanceConfig) r.GET("/plugin/execution", h.pluginHandlers.ShowPluginExecution)
r.GET("/maintenance/config/:taskType", h.maintenanceHandlers.ShowTaskConfig) r.GET("/plugin/monitoring", h.pluginHandlers.ShowPluginMonitoring)
r.POST("/maintenance/config/:taskType", h.maintenanceHandlers.UpdateTaskConfig)
r.GET("/maintenance/tasks/:id", h.maintenanceHandlers.ShowTaskDetail)
// API routes for AJAX calls // API routes for AJAX calls
api := r.Group("/api") api := r.Group("/api")
@@ -398,20 +399,25 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, adminUser,
volumeApi.POST("/:id/:server/vacuum", h.clusterHandlers.VacuumVolume) volumeApi.POST("/:id/:server/vacuum", h.clusterHandlers.VacuumVolume)
} }
// Maintenance API routes // Plugin API routes
maintenanceApi := api.Group("/maintenance") pluginApi := api.Group("/plugin")
{ {
maintenanceApi.POST("/scan", h.adminServer.TriggerMaintenanceScan) pluginApi.GET("/status", h.adminServer.GetPluginStatusAPI)
maintenanceApi.GET("/tasks", h.adminServer.GetMaintenanceTasks) pluginApi.GET("/workers", h.adminServer.GetPluginWorkersAPI)
maintenanceApi.GET("/tasks/:id", h.adminServer.GetMaintenanceTask) pluginApi.GET("/job-types", h.adminServer.GetPluginJobTypesAPI)
maintenanceApi.GET("/tasks/:id/detail", h.adminServer.GetMaintenanceTaskDetailAPI) pluginApi.GET("/jobs", h.adminServer.GetPluginJobsAPI)
maintenanceApi.POST("/tasks/:id/cancel", h.adminServer.CancelMaintenanceTask) pluginApi.GET("/jobs/:jobId", h.adminServer.GetPluginJobAPI)
maintenanceApi.GET("/workers", h.adminServer.GetMaintenanceWorkersAPI) pluginApi.GET("/jobs/:jobId/detail", h.adminServer.GetPluginJobDetailAPI)
maintenanceApi.GET("/workers/:id", h.adminServer.GetMaintenanceWorker) pluginApi.GET("/activities", h.adminServer.GetPluginActivitiesAPI)
maintenanceApi.GET("/workers/:id/logs", h.adminServer.GetWorkerLogs) pluginApi.GET("/scheduler-states", h.adminServer.GetPluginSchedulerStatesAPI)
maintenanceApi.GET("/stats", h.adminServer.GetMaintenanceStats) pluginApi.GET("/job-types/:jobType/descriptor", h.adminServer.GetPluginJobTypeDescriptorAPI)
maintenanceApi.GET("/config", h.adminServer.GetMaintenanceConfigAPI) pluginApi.POST("/job-types/:jobType/schema", h.adminServer.RequestPluginJobTypeSchemaAPI)
maintenanceApi.PUT("/config", h.adminServer.UpdateMaintenanceConfigAPI) pluginApi.GET("/job-types/:jobType/config", h.adminServer.GetPluginJobTypeConfigAPI)
pluginApi.PUT("/job-types/:jobType/config", h.adminServer.UpdatePluginJobTypeConfigAPI)
pluginApi.GET("/job-types/:jobType/runs", h.adminServer.GetPluginRunHistoryAPI)
pluginApi.POST("/job-types/:jobType/detect", h.adminServer.TriggerPluginDetectionAPI)
pluginApi.POST("/job-types/:jobType/run", h.adminServer.RunPluginJobTypeAPI)
pluginApi.POST("/jobs/execute", h.adminServer.ExecutePluginJobAPI)
} }
// Message Queue API routes // Message Queue API routes

View File

@@ -0,0 +1,95 @@
package handlers
import (
"testing"
"github.com/gin-gonic/gin"
"github.com/seaweedfs/seaweedfs/weed/admin/dash"
)
func TestSetupRoutes_RegistersPluginSchedulerStatesAPI_NoAuth(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
newRouteTestAdminHandlers().SetupRoutes(router, false, "", "", "", "", true)
if !hasRoute(router, "GET", "/api/plugin/scheduler-states") {
t.Fatalf("expected GET /api/plugin/scheduler-states to be registered in no-auth mode")
}
if !hasRoute(router, "GET", "/api/plugin/jobs/:jobId/detail") {
t.Fatalf("expected GET /api/plugin/jobs/:jobId/detail to be registered in no-auth mode")
}
}
func TestSetupRoutes_RegistersPluginSchedulerStatesAPI_WithAuth(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
newRouteTestAdminHandlers().SetupRoutes(router, true, "admin", "password", "", "", true)
if !hasRoute(router, "GET", "/api/plugin/scheduler-states") {
t.Fatalf("expected GET /api/plugin/scheduler-states to be registered in auth mode")
}
if !hasRoute(router, "GET", "/api/plugin/jobs/:jobId/detail") {
t.Fatalf("expected GET /api/plugin/jobs/:jobId/detail to be registered in auth mode")
}
}
func TestSetupRoutes_RegistersPluginPages_NoAuth(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
newRouteTestAdminHandlers().SetupRoutes(router, false, "", "", "", "", true)
assertHasRoute(t, router, "GET", "/plugin")
assertHasRoute(t, router, "GET", "/plugin/configuration")
assertHasRoute(t, router, "GET", "/plugin/queue")
assertHasRoute(t, router, "GET", "/plugin/detection")
assertHasRoute(t, router, "GET", "/plugin/execution")
assertHasRoute(t, router, "GET", "/plugin/monitoring")
}
func TestSetupRoutes_RegistersPluginPages_WithAuth(t *testing.T) {
gin.SetMode(gin.TestMode)
router := gin.New()
newRouteTestAdminHandlers().SetupRoutes(router, true, "admin", "password", "", "", true)
assertHasRoute(t, router, "GET", "/plugin")
assertHasRoute(t, router, "GET", "/plugin/configuration")
assertHasRoute(t, router, "GET", "/plugin/queue")
assertHasRoute(t, router, "GET", "/plugin/detection")
assertHasRoute(t, router, "GET", "/plugin/execution")
assertHasRoute(t, router, "GET", "/plugin/monitoring")
}
func newRouteTestAdminHandlers() *AdminHandlers {
adminServer := &dash.AdminServer{}
return &AdminHandlers{
adminServer: adminServer,
authHandlers: &AuthHandlers{adminServer: adminServer},
clusterHandlers: &ClusterHandlers{adminServer: adminServer},
fileBrowserHandlers: &FileBrowserHandlers{adminServer: adminServer},
userHandlers: &UserHandlers{adminServer: adminServer},
policyHandlers: &PolicyHandlers{adminServer: adminServer},
pluginHandlers: &PluginHandlers{adminServer: adminServer},
mqHandlers: &MessageQueueHandlers{adminServer: adminServer},
serviceAccountHandlers: &ServiceAccountHandlers{adminServer: adminServer},
}
}
func hasRoute(router *gin.Engine, method string, path string) bool {
for _, route := range router.Routes() {
if route.Method == method && route.Path == path {
return true
}
}
return false
}
func assertHasRoute(t *testing.T, router *gin.Engine, method string, path string) {
t.Helper()
if !hasRoute(router, method, path) {
t.Fatalf("expected %s %s to be registered", method, path)
}
}

View File

@@ -1,550 +0,0 @@
package handlers
import (
"context"
"fmt"
"net/http"
"reflect"
"strconv"
"strings"
"time"
"github.com/gin-gonic/gin"
"github.com/seaweedfs/seaweedfs/weed/admin/config"
"github.com/seaweedfs/seaweedfs/weed/admin/dash"
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
"github.com/seaweedfs/seaweedfs/weed/admin/view/app"
"github.com/seaweedfs/seaweedfs/weed/admin/view/layout"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// MaintenanceHandlers handles maintenance-related HTTP requests
type MaintenanceHandlers struct {
adminServer *dash.AdminServer
}
// NewMaintenanceHandlers creates a new instance of MaintenanceHandlers
func NewMaintenanceHandlers(adminServer *dash.AdminServer) *MaintenanceHandlers {
return &MaintenanceHandlers{
adminServer: adminServer,
}
}
// ShowTaskDetail displays the task detail page
func (h *MaintenanceHandlers) ShowTaskDetail(c *gin.Context) {
taskID := c.Param("id")
if h.adminServer == nil {
c.String(http.StatusInternalServerError, "Admin server not initialized")
return
}
taskDetail, err := h.adminServer.GetMaintenanceTaskDetail(taskID)
if err != nil {
glog.Errorf("DEBUG ShowTaskDetail: error getting task detail for %s: %v", taskID, err)
c.String(http.StatusNotFound, "Task not found: %s (Error: %v)", taskID, err)
return
}
c.Header("Content-Type", "text/html")
taskDetailComponent := app.TaskDetail(taskDetail)
layoutComponent := layout.Layout(c, taskDetailComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
glog.Errorf("DEBUG ShowTaskDetail: render error: %v", err)
c.String(http.StatusInternalServerError, "Failed to render template: %v", err)
return
}
}
// ShowMaintenanceQueue displays the maintenance queue page
func (h *MaintenanceHandlers) ShowMaintenanceQueue(c *gin.Context) {
// Add timeout to prevent hanging
ctx, cancel := context.WithTimeout(c.Request.Context(), 30*time.Second)
defer cancel()
// Use a channel to handle timeout for data retrieval
type result struct {
data *maintenance.MaintenanceQueueData
err error
}
resultChan := make(chan result, 1)
go func() {
data, err := h.getMaintenanceQueueData()
resultChan <- result{data: data, err: err}
}()
select {
case res := <-resultChan:
if res.err != nil {
glog.V(1).Infof("ShowMaintenanceQueue: error getting data: %v", res.err)
c.JSON(http.StatusInternalServerError, gin.H{"error": res.err.Error()})
return
}
glog.V(2).Infof("ShowMaintenanceQueue: got data with %d tasks", len(res.data.Tasks))
// Render HTML template
c.Header("Content-Type", "text/html")
maintenanceComponent := app.MaintenanceQueue(res.data)
layoutComponent := layout.Layout(c, maintenanceComponent)
err := layoutComponent.Render(ctx, c.Writer)
if err != nil {
glog.V(1).Infof("ShowMaintenanceQueue: render error: %v", err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
glog.V(3).Infof("ShowMaintenanceQueue: template rendered successfully")
case <-ctx.Done():
glog.Warningf("ShowMaintenanceQueue: timeout waiting for data")
c.JSON(http.StatusRequestTimeout, gin.H{
"error": "Request timeout - maintenance data retrieval took too long. This may indicate a system issue.",
"suggestion": "Try refreshing the page or contact system administrator if the problem persists.",
})
return
}
}
// ShowMaintenanceWorkers displays the maintenance workers page
func (h *MaintenanceHandlers) ShowMaintenanceWorkers(c *gin.Context) {
if h.adminServer == nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Admin server not initialized"})
return
}
workersData, err := h.adminServer.GetMaintenanceWorkersData()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
// Render HTML template
c.Header("Content-Type", "text/html")
workersComponent := app.MaintenanceWorkers(workersData)
layoutComponent := layout.Layout(c, workersComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
// ShowMaintenanceConfig displays the maintenance configuration page
func (h *MaintenanceHandlers) ShowMaintenanceConfig(c *gin.Context) {
config, err := h.getMaintenanceConfig()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
// Get the schema for dynamic form rendering
schema := maintenance.GetMaintenanceConfigSchema()
// Render HTML template using schema-driven approach
c.Header("Content-Type", "text/html")
configComponent := app.MaintenanceConfigSchema(config, schema)
layoutComponent := layout.Layout(c, configComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
// ShowTaskConfig displays the configuration page for a specific task type
func (h *MaintenanceHandlers) ShowTaskConfig(c *gin.Context) {
taskTypeName := c.Param("taskType")
// Get the schema for this task type
schema := tasks.GetTaskConfigSchema(taskTypeName)
if schema == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "Task type not found or no schema available"})
return
}
// Get the UI provider for current configuration
uiRegistry := tasks.GetGlobalUIRegistry()
typesRegistry := tasks.GetGlobalTypesRegistry()
var provider types.TaskUIProvider
for workerTaskType := range typesRegistry.GetAllDetectors() {
if string(workerTaskType) == taskTypeName {
provider = uiRegistry.GetProvider(workerTaskType)
break
}
}
if provider == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "UI provider not found for task type"})
return
}
// Get current configuration
currentConfig := provider.GetCurrentConfig()
// Note: Do NOT apply schema defaults to current config as it overrides saved values
// Only apply defaults when creating new configs, not when displaying existing ones
// Create task configuration data
configData := &maintenance.TaskConfigData{
TaskType: maintenance.MaintenanceTaskType(taskTypeName),
TaskName: schema.DisplayName,
TaskIcon: schema.Icon,
Description: schema.Description,
}
// Render HTML template using schema-based approach
c.Header("Content-Type", "text/html")
taskConfigComponent := app.TaskConfigSchema(configData, schema, currentConfig)
layoutComponent := layout.Layout(c, taskConfigComponent)
err := layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
// UpdateTaskConfig updates task configuration from form
func (h *MaintenanceHandlers) UpdateTaskConfig(c *gin.Context) {
taskTypeName := c.Param("taskType")
taskType := types.TaskType(taskTypeName)
// Parse form data
err := c.Request.ParseForm()
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse form data: " + err.Error()})
return
}
// Debug logging - show received form data
glog.V(1).Infof("Received form data for task type %s:", taskTypeName)
for key, values := range c.Request.PostForm {
glog.V(1).Infof(" %s: %v", key, values)
}
// Get the task configuration schema
schema := tasks.GetTaskConfigSchema(taskTypeName)
if schema == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "Schema not found for task type: " + taskTypeName})
return
}
// Create a new config instance based on task type and apply schema defaults
var config TaskConfig
switch taskType {
case types.TaskTypeVacuum:
config = &vacuum.Config{}
case types.TaskTypeBalance:
config = &balance.Config{}
case types.TaskTypeErasureCoding:
config = &erasure_coding.Config{}
default:
c.JSON(http.StatusBadRequest, gin.H{"error": "Unsupported task type: " + taskTypeName})
return
}
// Apply schema defaults first using type-safe method
if err := schema.ApplyDefaultsToConfig(config); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to apply defaults: " + err.Error()})
return
}
// First, get the current configuration to preserve existing values
currentUIRegistry := tasks.GetGlobalUIRegistry()
currentTypesRegistry := tasks.GetGlobalTypesRegistry()
var currentProvider types.TaskUIProvider
for workerTaskType := range currentTypesRegistry.GetAllDetectors() {
if string(workerTaskType) == string(taskType) {
currentProvider = currentUIRegistry.GetProvider(workerTaskType)
break
}
}
if currentProvider != nil {
// Copy current config values to the new config
currentConfig := currentProvider.GetCurrentConfig()
if currentConfigProtobuf, ok := currentConfig.(TaskConfig); ok {
// Apply current values using protobuf directly - no map conversion needed!
currentPolicy := currentConfigProtobuf.ToTaskPolicy()
if err := config.FromTaskPolicy(currentPolicy); err != nil {
glog.Warningf("Failed to load current config for %s: %v", taskTypeName, err)
}
}
}
// Parse form data using schema-based approach (this will override with new values)
err = h.parseTaskConfigFromForm(c.Request.PostForm, schema, config)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse configuration: " + err.Error()})
return
}
// Debug logging - show parsed config values
switch taskType {
case types.TaskTypeVacuum:
if vacuumConfig, ok := config.(*vacuum.Config); ok {
glog.V(1).Infof("Parsed vacuum config - GarbageThreshold: %f, MinVolumeAgeSeconds: %d, MinIntervalSeconds: %d",
vacuumConfig.GarbageThreshold, vacuumConfig.MinVolumeAgeSeconds, vacuumConfig.MinIntervalSeconds)
}
case types.TaskTypeErasureCoding:
if ecConfig, ok := config.(*erasure_coding.Config); ok {
glog.V(1).Infof("Parsed EC config - FullnessRatio: %f, QuietForSeconds: %d, MinSizeMB: %d, CollectionFilter: '%s'",
ecConfig.FullnessRatio, ecConfig.QuietForSeconds, ecConfig.MinSizeMB, ecConfig.CollectionFilter)
}
case types.TaskTypeBalance:
if balanceConfig, ok := config.(*balance.Config); ok {
glog.V(1).Infof("Parsed balance config - Enabled: %v, MaxConcurrent: %d, ScanIntervalSeconds: %d, ImbalanceThreshold: %f, MinServerCount: %d",
balanceConfig.Enabled, balanceConfig.MaxConcurrent, balanceConfig.ScanIntervalSeconds, balanceConfig.ImbalanceThreshold, balanceConfig.MinServerCount)
}
}
// Validate the configuration
if validationErrors := schema.ValidateConfig(config); len(validationErrors) > 0 {
errorMessages := make([]string, len(validationErrors))
for i, err := range validationErrors {
errorMessages[i] = err.Error()
}
c.JSON(http.StatusBadRequest, gin.H{"error": "Configuration validation failed", "details": errorMessages})
return
}
// Apply configuration using UIProvider
uiRegistry := tasks.GetGlobalUIRegistry()
typesRegistry := tasks.GetGlobalTypesRegistry()
var provider types.TaskUIProvider
for workerTaskType := range typesRegistry.GetAllDetectors() {
if string(workerTaskType) == string(taskType) {
provider = uiRegistry.GetProvider(workerTaskType)
break
}
}
if provider == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "UI provider not found for task type"})
return
}
// Apply configuration using provider
err = provider.ApplyTaskConfig(config)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to apply configuration: " + err.Error()})
return
}
// Save task configuration to protobuf file using ConfigPersistence
if h.adminServer != nil && h.adminServer.GetConfigPersistence() != nil {
err = h.saveTaskConfigToProtobuf(taskType, config)
if err != nil {
glog.Warningf("Failed to save task config to protobuf file: %v", err)
// Don't fail the request, just log the warning
}
} else if h.adminServer == nil {
glog.Warningf("Failed to save task config: admin server not initialized")
}
// Trigger a configuration reload in the maintenance manager
if h.adminServer != nil {
if manager := h.adminServer.GetMaintenanceManager(); manager != nil {
err = manager.ReloadTaskConfigurations()
if err != nil {
glog.Warningf("Failed to reload task configurations: %v", err)
} else {
glog.V(1).Infof("Successfully reloaded task configurations after updating %s", taskTypeName)
}
}
}
// Redirect back to task configuration page
c.Redirect(http.StatusSeeOther, "/maintenance/config/"+taskTypeName)
}
// parseTaskConfigFromForm parses form data using schema definitions
func (h *MaintenanceHandlers) parseTaskConfigFromForm(formData map[string][]string, schema *tasks.TaskConfigSchema, config interface{}) error {
configValue := reflect.ValueOf(config)
if configValue.Kind() == reflect.Ptr {
configValue = configValue.Elem()
}
if configValue.Kind() != reflect.Struct {
return fmt.Errorf("config must be a struct or pointer to struct")
}
configType := configValue.Type()
for i := 0; i < configValue.NumField(); i++ {
field := configValue.Field(i)
fieldType := configType.Field(i)
// Handle embedded structs recursively
if fieldType.Anonymous && field.Kind() == reflect.Struct {
err := h.parseTaskConfigFromForm(formData, schema, field.Addr().Interface())
if err != nil {
return fmt.Errorf("error parsing embedded struct %s: %w", fieldType.Name, err)
}
continue
}
// Get JSON tag name
jsonTag := fieldType.Tag.Get("json")
if jsonTag == "" {
continue
}
// Remove options like ",omitempty"
if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 {
jsonTag = jsonTag[:commaIdx]
}
// Find corresponding schema field
schemaField := schema.GetFieldByName(jsonTag)
if schemaField == nil {
continue
}
// Parse value based on field type
if err := h.parseFieldFromForm(formData, schemaField, field); err != nil {
return fmt.Errorf("error parsing field %s: %w", schemaField.DisplayName, err)
}
}
return nil
}
// parseFieldFromForm parses a single field value from form data
func (h *MaintenanceHandlers) parseFieldFromForm(formData map[string][]string, schemaField *config.Field, fieldValue reflect.Value) error {
if !fieldValue.CanSet() {
return nil
}
switch schemaField.Type {
case config.FieldTypeBool:
// Checkbox fields - present means true, absent means false
_, exists := formData[schemaField.JSONName]
fieldValue.SetBool(exists)
case config.FieldTypeInt:
if values, ok := formData[schemaField.JSONName]; ok && len(values) > 0 {
if intVal, err := strconv.Atoi(values[0]); err != nil {
return fmt.Errorf("invalid integer value: %s", values[0])
} else {
fieldValue.SetInt(int64(intVal))
}
}
case config.FieldTypeFloat:
if values, ok := formData[schemaField.JSONName]; ok && len(values) > 0 {
if floatVal, err := strconv.ParseFloat(values[0], 64); err != nil {
return fmt.Errorf("invalid float value: %s", values[0])
} else {
fieldValue.SetFloat(floatVal)
}
}
case config.FieldTypeString:
if values, ok := formData[schemaField.JSONName]; ok && len(values) > 0 {
fieldValue.SetString(values[0])
}
case config.FieldTypeInterval:
// Parse interval fields with value + unit
valueKey := schemaField.JSONName + "_value"
unitKey := schemaField.JSONName + "_unit"
if valueStrs, ok := formData[valueKey]; ok && len(valueStrs) > 0 {
value, err := strconv.Atoi(valueStrs[0])
if err != nil {
return fmt.Errorf("invalid interval value: %s", valueStrs[0])
}
unit := "minutes" // default
if unitStrs, ok := formData[unitKey]; ok && len(unitStrs) > 0 {
unit = unitStrs[0]
}
// Convert to seconds
seconds := config.IntervalValueUnitToSeconds(value, unit)
fieldValue.SetInt(int64(seconds))
}
default:
return fmt.Errorf("unsupported field type: %s", schemaField.Type)
}
return nil
}
// UpdateMaintenanceConfig updates maintenance configuration from form
func (h *MaintenanceHandlers) UpdateMaintenanceConfig(c *gin.Context) {
var config maintenance.MaintenanceConfig
if err := c.ShouldBind(&config); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
err := h.updateMaintenanceConfig(&config)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.Redirect(http.StatusSeeOther, "/maintenance/config")
}
// Helper methods that delegate to AdminServer
func (h *MaintenanceHandlers) getMaintenanceQueueData() (*maintenance.MaintenanceQueueData, error) {
if h.adminServer == nil {
return nil, fmt.Errorf("admin server not initialized")
}
// Use the exported method from AdminServer used by the JSON API
return h.adminServer.GetMaintenanceQueueData()
}
func (h *MaintenanceHandlers) getMaintenanceConfig() (*maintenance.MaintenanceConfigData, error) {
if h.adminServer == nil {
return nil, fmt.Errorf("admin server not initialized")
}
// Delegate to AdminServer's real persistence method
return h.adminServer.GetMaintenanceConfigData()
}
func (h *MaintenanceHandlers) updateMaintenanceConfig(config *maintenance.MaintenanceConfig) error {
if h.adminServer == nil {
return fmt.Errorf("admin server not initialized")
}
// Delegate to AdminServer's real persistence method
return h.adminServer.UpdateMaintenanceConfigData(config)
}
// saveTaskConfigToProtobuf saves task configuration to protobuf file
func (h *MaintenanceHandlers) saveTaskConfigToProtobuf(taskType types.TaskType, config TaskConfig) error {
configPersistence := h.adminServer.GetConfigPersistence()
if configPersistence == nil {
return fmt.Errorf("config persistence not available")
}
// Use the new ToTaskPolicy method - much simpler and more maintainable!
taskPolicy := config.ToTaskPolicy()
// Save using task-specific methods
switch taskType {
case types.TaskTypeVacuum:
return configPersistence.SaveVacuumTaskPolicy(taskPolicy)
case types.TaskTypeErasureCoding:
return configPersistence.SaveErasureCodingTaskPolicy(taskPolicy)
case types.TaskTypeBalance:
return configPersistence.SaveBalanceTaskPolicy(taskPolicy)
default:
return fmt.Errorf("unsupported task type for protobuf persistence: %s", taskType)
}
}

View File

@@ -1,389 +0,0 @@
package handlers
import (
"net/url"
"testing"
"github.com/seaweedfs/seaweedfs/weed/admin/config"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
)
func TestParseTaskConfigFromForm_WithEmbeddedStruct(t *testing.T) {
// Create a maintenance handlers instance for testing
h := &MaintenanceHandlers{}
// Test with balance config
t.Run("Balance Config", func(t *testing.T) {
// Simulate form data
formData := url.Values{
"enabled": {"on"}, // checkbox field
"scan_interval_seconds_value": {"30"}, // interval field
"scan_interval_seconds_unit": {"minutes"}, // interval unit
"max_concurrent": {"2"}, // number field
"imbalance_threshold": {"0.15"}, // float field
"min_server_count": {"3"}, // number field
}
// Get schema
schema := tasks.GetTaskConfigSchema("balance")
if schema == nil {
t.Fatal("Failed to get balance schema")
}
// Create config instance
config := &balance.Config{}
// Parse form data
err := h.parseTaskConfigFromForm(formData, schema, config)
if err != nil {
t.Fatalf("Failed to parse form data: %v", err)
}
// Verify embedded struct fields were set correctly
if !config.Enabled {
t.Errorf("Expected Enabled=true, got %v", config.Enabled)
}
if config.ScanIntervalSeconds != 1800 { // 30 minutes * 60
t.Errorf("Expected ScanIntervalSeconds=1800, got %v", config.ScanIntervalSeconds)
}
if config.MaxConcurrent != 2 {
t.Errorf("Expected MaxConcurrent=2, got %v", config.MaxConcurrent)
}
// Verify balance-specific fields were set correctly
if config.ImbalanceThreshold != 0.15 {
t.Errorf("Expected ImbalanceThreshold=0.15, got %v", config.ImbalanceThreshold)
}
if config.MinServerCount != 3 {
t.Errorf("Expected MinServerCount=3, got %v", config.MinServerCount)
}
})
// Test with vacuum config
t.Run("Vacuum Config", func(t *testing.T) {
// Simulate form data
formData := url.Values{
// "enabled" field omitted to simulate unchecked checkbox
"scan_interval_seconds_value": {"4"}, // interval field
"scan_interval_seconds_unit": {"hours"}, // interval unit
"max_concurrent": {"3"}, // number field
"garbage_threshold": {"0.4"}, // float field
"min_volume_age_seconds_value": {"2"}, // interval field
"min_volume_age_seconds_unit": {"days"}, // interval unit
"min_interval_seconds_value": {"1"}, // interval field
"min_interval_seconds_unit": {"days"}, // interval unit
}
// Get schema
schema := tasks.GetTaskConfigSchema("vacuum")
if schema == nil {
t.Fatal("Failed to get vacuum schema")
}
// Create config instance
config := &vacuum.Config{}
// Parse form data
err := h.parseTaskConfigFromForm(formData, schema, config)
if err != nil {
t.Fatalf("Failed to parse form data: %v", err)
}
// Verify embedded struct fields were set correctly
if config.Enabled {
t.Errorf("Expected Enabled=false, got %v", config.Enabled)
}
if config.ScanIntervalSeconds != 14400 { // 4 hours * 3600
t.Errorf("Expected ScanIntervalSeconds=14400, got %v", config.ScanIntervalSeconds)
}
if config.MaxConcurrent != 3 {
t.Errorf("Expected MaxConcurrent=3, got %v", config.MaxConcurrent)
}
// Verify vacuum-specific fields were set correctly
if config.GarbageThreshold != 0.4 {
t.Errorf("Expected GarbageThreshold=0.4, got %v", config.GarbageThreshold)
}
if config.MinVolumeAgeSeconds != 172800 { // 2 days * 86400
t.Errorf("Expected MinVolumeAgeSeconds=172800, got %v", config.MinVolumeAgeSeconds)
}
if config.MinIntervalSeconds != 86400 { // 1 day * 86400
t.Errorf("Expected MinIntervalSeconds=86400, got %v", config.MinIntervalSeconds)
}
})
// Test with erasure coding config
t.Run("Erasure Coding Config", func(t *testing.T) {
// Simulate form data
formData := url.Values{
"enabled": {"on"}, // checkbox field
"scan_interval_seconds_value": {"2"}, // interval field
"scan_interval_seconds_unit": {"hours"}, // interval unit
"max_concurrent": {"1"}, // number field
"quiet_for_seconds_value": {"10"}, // interval field
"quiet_for_seconds_unit": {"minutes"}, // interval unit
"fullness_ratio": {"0.85"}, // float field
"collection_filter": {"test_collection"}, // string field
"min_size_mb": {"50"}, // number field
}
// Get schema
schema := tasks.GetTaskConfigSchema("erasure_coding")
if schema == nil {
t.Fatal("Failed to get erasure_coding schema")
}
// Create config instance
config := &erasure_coding.Config{}
// Parse form data
err := h.parseTaskConfigFromForm(formData, schema, config)
if err != nil {
t.Fatalf("Failed to parse form data: %v", err)
}
// Verify embedded struct fields were set correctly
if !config.Enabled {
t.Errorf("Expected Enabled=true, got %v", config.Enabled)
}
if config.ScanIntervalSeconds != 7200 { // 2 hours * 3600
t.Errorf("Expected ScanIntervalSeconds=7200, got %v", config.ScanIntervalSeconds)
}
if config.MaxConcurrent != 1 {
t.Errorf("Expected MaxConcurrent=1, got %v", config.MaxConcurrent)
}
// Verify erasure coding-specific fields were set correctly
if config.QuietForSeconds != 600 { // 10 minutes * 60
t.Errorf("Expected QuietForSeconds=600, got %v", config.QuietForSeconds)
}
if config.FullnessRatio != 0.85 {
t.Errorf("Expected FullnessRatio=0.85, got %v", config.FullnessRatio)
}
if config.CollectionFilter != "test_collection" {
t.Errorf("Expected CollectionFilter='test_collection', got %v", config.CollectionFilter)
}
if config.MinSizeMB != 50 {
t.Errorf("Expected MinSizeMB=50, got %v", config.MinSizeMB)
}
})
}
func TestConfigurationValidation(t *testing.T) {
// Test that config structs can be validated and converted to protobuf format
taskTypes := []struct {
name string
config interface{}
}{
{
"balance",
&balance.Config{
BaseConfig: base.BaseConfig{
Enabled: true,
ScanIntervalSeconds: 2400,
MaxConcurrent: 3,
},
ImbalanceThreshold: 0.18,
MinServerCount: 4,
},
},
{
"vacuum",
&vacuum.Config{
BaseConfig: base.BaseConfig{
Enabled: false,
ScanIntervalSeconds: 7200,
MaxConcurrent: 2,
},
GarbageThreshold: 0.35,
MinVolumeAgeSeconds: 86400,
MinIntervalSeconds: 604800,
},
},
{
"erasure_coding",
&erasure_coding.Config{
BaseConfig: base.BaseConfig{
Enabled: true,
ScanIntervalSeconds: 3600,
MaxConcurrent: 1,
},
QuietForSeconds: 900,
FullnessRatio: 0.9,
CollectionFilter: "important",
MinSizeMB: 100,
},
},
}
for _, test := range taskTypes {
t.Run(test.name, func(t *testing.T) {
// Test that configs can be converted to protobuf TaskPolicy
switch cfg := test.config.(type) {
case *balance.Config:
policy := cfg.ToTaskPolicy()
if policy == nil {
t.Fatal("ToTaskPolicy returned nil")
}
if policy.Enabled != cfg.Enabled {
t.Errorf("Expected Enabled=%v, got %v", cfg.Enabled, policy.Enabled)
}
if policy.MaxConcurrent != int32(cfg.MaxConcurrent) {
t.Errorf("Expected MaxConcurrent=%v, got %v", cfg.MaxConcurrent, policy.MaxConcurrent)
}
case *vacuum.Config:
policy := cfg.ToTaskPolicy()
if policy == nil {
t.Fatal("ToTaskPolicy returned nil")
}
if policy.Enabled != cfg.Enabled {
t.Errorf("Expected Enabled=%v, got %v", cfg.Enabled, policy.Enabled)
}
if policy.MaxConcurrent != int32(cfg.MaxConcurrent) {
t.Errorf("Expected MaxConcurrent=%v, got %v", cfg.MaxConcurrent, policy.MaxConcurrent)
}
case *erasure_coding.Config:
policy := cfg.ToTaskPolicy()
if policy == nil {
t.Fatal("ToTaskPolicy returned nil")
}
if policy.Enabled != cfg.Enabled {
t.Errorf("Expected Enabled=%v, got %v", cfg.Enabled, policy.Enabled)
}
if policy.MaxConcurrent != int32(cfg.MaxConcurrent) {
t.Errorf("Expected MaxConcurrent=%v, got %v", cfg.MaxConcurrent, policy.MaxConcurrent)
}
default:
t.Fatalf("Unknown config type: %T", test.config)
}
// Test that configs can be validated
switch cfg := test.config.(type) {
case *balance.Config:
if err := cfg.Validate(); err != nil {
t.Errorf("Validation failed: %v", err)
}
case *vacuum.Config:
if err := cfg.Validate(); err != nil {
t.Errorf("Validation failed: %v", err)
}
case *erasure_coding.Config:
if err := cfg.Validate(); err != nil {
t.Errorf("Validation failed: %v", err)
}
}
})
}
}
func TestParseFieldFromForm_EdgeCases(t *testing.T) {
h := &MaintenanceHandlers{}
// Test checkbox parsing (boolean fields)
t.Run("Checkbox Fields", func(t *testing.T) {
tests := []struct {
name string
formData url.Values
expectedValue bool
}{
{"Checked checkbox", url.Values{"test_field": {"on"}}, true},
{"Unchecked checkbox", url.Values{}, false},
{"Empty value checkbox", url.Values{"test_field": {""}}, true}, // Present but empty means checked
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
schema := &tasks.TaskConfigSchema{
Schema: config.Schema{
Fields: []*config.Field{
{
JSONName: "test_field",
Type: config.FieldTypeBool,
InputType: "checkbox",
},
},
},
}
type TestConfig struct {
TestField bool `json:"test_field"`
}
config := &TestConfig{}
err := h.parseTaskConfigFromForm(test.formData, schema, config)
if err != nil {
t.Fatalf("parseTaskConfigFromForm failed: %v", err)
}
if config.TestField != test.expectedValue {
t.Errorf("Expected %v, got %v", test.expectedValue, config.TestField)
}
})
}
})
// Test interval parsing
t.Run("Interval Fields", func(t *testing.T) {
tests := []struct {
name string
value string
unit string
expectedSecs int
}{
{"Minutes", "30", "minutes", 1800},
{"Hours", "2", "hours", 7200},
{"Days", "1", "days", 86400},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
formData := url.Values{
"test_field_value": {test.value},
"test_field_unit": {test.unit},
}
schema := &tasks.TaskConfigSchema{
Schema: config.Schema{
Fields: []*config.Field{
{
JSONName: "test_field",
Type: config.FieldTypeInterval,
InputType: "interval",
},
},
},
}
type TestConfig struct {
TestField int `json:"test_field"`
}
config := &TestConfig{}
err := h.parseTaskConfigFromForm(formData, schema, config)
if err != nil {
t.Fatalf("parseTaskConfigFromForm failed: %v", err)
}
if config.TestField != test.expectedSecs {
t.Errorf("Expected %d seconds, got %d", test.expectedSecs, config.TestField)
}
})
}
})
}

View File

@@ -0,0 +1,67 @@
package handlers
import (
"bytes"
"net/http"
"github.com/gin-gonic/gin"
"github.com/seaweedfs/seaweedfs/weed/admin/dash"
"github.com/seaweedfs/seaweedfs/weed/admin/view/app"
"github.com/seaweedfs/seaweedfs/weed/admin/view/layout"
)
// PluginHandlers handles plugin UI pages.
type PluginHandlers struct {
adminServer *dash.AdminServer
}
// NewPluginHandlers creates a new instance of PluginHandlers.
func NewPluginHandlers(adminServer *dash.AdminServer) *PluginHandlers {
return &PluginHandlers{
adminServer: adminServer,
}
}
// ShowPlugin displays plugin overview page.
func (h *PluginHandlers) ShowPlugin(c *gin.Context) {
h.renderPluginPage(c, "overview")
}
// ShowPluginConfiguration displays plugin configuration page.
func (h *PluginHandlers) ShowPluginConfiguration(c *gin.Context) {
h.renderPluginPage(c, "configuration")
}
// ShowPluginDetection displays plugin detection jobs page.
func (h *PluginHandlers) ShowPluginDetection(c *gin.Context) {
h.renderPluginPage(c, "detection")
}
// ShowPluginQueue displays plugin job queue page.
func (h *PluginHandlers) ShowPluginQueue(c *gin.Context) {
h.renderPluginPage(c, "queue")
}
// ShowPluginExecution displays plugin execution jobs page.
func (h *PluginHandlers) ShowPluginExecution(c *gin.Context) {
h.renderPluginPage(c, "execution")
}
// ShowPluginMonitoring displays plugin monitoring page.
func (h *PluginHandlers) ShowPluginMonitoring(c *gin.Context) {
// Backward-compatible alias for the old monitoring URL.
h.renderPluginPage(c, "detection")
}
func (h *PluginHandlers) renderPluginPage(c *gin.Context, page string) {
component := app.Plugin(page)
layoutComponent := layout.Layout(c, component)
var buf bytes.Buffer
if err := layoutComponent.Render(c.Request.Context(), &buf); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
c.Data(http.StatusOK, "text/html; charset=utf-8", buf.Bytes())
}

205
weed/admin/plugin/DESIGN.md Normal file
View File

@@ -0,0 +1,205 @@
# Admin Worker Plugin System (Design)
This document describes the plugin system for admin-managed workers, implemented in parallel with the current maintenance/worker mechanism.
## Scope
- Add a new plugin protocol and runtime model for multi-language workers.
- Keep all current admin + worker code paths untouched.
- Use gRPC for all admin-worker communication.
- Let workers describe job configuration UI declaratively via protobuf.
- Persist all job type configuration under admin server data directory.
- Support detector workers and executor workers per job type.
- Add end-to-end workflow observability (activities, active jobs, progress).
## New Contract
- Proto file: `weed/pb/plugin.proto`
- gRPC service: `PluginControlService.WorkerStream`
- Connection model: worker-initiated long-lived bidirectional stream.
Why this model:
- Works for workers in any language with gRPC support.
- Avoids admin dialing constraints in NAT/private networks.
- Allows command/response, progress streaming, and heartbeat over one channel.
## Core Runtime Components (Admin Side)
1. `PluginRegistry`
- Tracks connected workers and their per-job-type capabilities.
- Maintains liveness via heartbeat timeout.
2. `SchemaCoordinator`
- For each job type, asks one capable worker for `JobTypeDescriptor`.
- Caches descriptor version and refresh timestamp.
3. `ConfigStore`
- Persists descriptor + saved config values in `dataDir`.
- Stores both:
- Admin-owned runtime config (detection interval, dispatch concurrency, retry).
- Worker-owned config values (plugin-specific detection/execution knobs).
4. `DetectorScheduler`
- Per job type, chooses one detector worker (`can_detect=true`).
- Sends `RunDetectionRequest` with saved configs + cluster context.
- Accepts `DetectionProposals`, dedupes by `dedupe_key`, inserts jobs.
5. `JobDispatcher`
- Chooses executor worker (`can_execute=true`) for each pending job.
- Sends `ExecuteJobRequest`.
- Consumes `JobProgressUpdate` and `JobCompleted`.
6. `WorkflowMonitor`
- Builds live counters and timeline from events:
- activities per job type,
- active jobs,
- per-job progress/state,
- worker health/load.
## Worker Responsibilities
1. Register capabilities on connect (`WorkerHello`).
2. Expose job type descriptor (`ConfigSchemaResponse`) including UI schemas:
- admin config form,
- worker config form,
- defaults.
3. Run detection on demand (`RunDetectionRequest`) and return proposals.
4. Execute assigned jobs (`ExecuteJobRequest`) and stream progress.
5. Heartbeat regularly with slot usage and running work.
6. Handle cancellation requests (`CancelRequest`) for in-flight detection/execution.
## Declarative UI Model
UI is fully derived from protobuf schema:
- `ConfigForm`
- `ConfigSection`
- `ConfigField`
- `ConfigOption`
- `ValidationRule`
- `ConfigValue` (typed scalar/list/map/object value container)
Result:
- Admin can render forms without hardcoded task structs.
- New job types can ship UI schema from worker binary alone.
- Worker language is irrelevant as long as it can emit protobuf messages.
## Detection and Dispatch Flow
1. Worker connects and registers capabilities.
2. Admin requests descriptor per job type.
3. Admin persists descriptor and editable config values.
4. On detection interval (admin-owned setting):
- Admin chooses one detector worker for that job type.
- Sends `RunDetectionRequest` with:
- `AdminRuntimeConfig`,
- `admin_config_values`,
- `worker_config_values`,
- `ClusterContext` (master/filer/volume grpc locations, metadata).
5. Detector emits `DetectionProposals` and `DetectionComplete`.
6. Admin dedupes and enqueues jobs.
7. Dispatcher assigns jobs to any eligible executor worker.
8. Executor emits `JobProgressUpdate` and `JobCompleted`.
9. Monitor updates workflow UI in near-real-time.
## Persistence Layout (Admin Data Dir)
Current layout under `<admin-data-dir>/plugin/`:
- `job_types/<job_type>/descriptor.pb`
- `job_types/<job_type>/descriptor.json`
- `job_types/<job_type>/config.pb`
- `job_types/<job_type>/config.json`
- `job_types/<job_type>/runs.json`
- `jobs/tracked_jobs.json`
- `activities/activities.json`
`config.pb` should use `PersistedJobTypeConfig` from `plugin.proto`.
## Admin UI
- Route: `/plugin`
- Includes:
- runtime status,
- workers/capabilities,
- declarative descriptor-driven config forms,
- run history (last 10 success + last 10 errors),
- tracked jobs and activity stream,
- manual actions for schema refresh, detection, and detect+execute workflow.
## Scheduling Policy (Initial)
Detector selection per job type:
- only workers with `can_detect=true`.
- prefer healthy worker with highest free detection slots.
- lease ends when heartbeat timeout or stream drop.
Execution dispatch:
- only workers with `can_execute=true`.
- select by available execution slots and least active jobs.
- retry on failure using admin runtime retry config.
## Safety and Reliability
- Idempotency: dedupe proposals by (`job_type`, `dedupe_key`).
- Backpressure: enforce max jobs per detection run.
- Timeouts: detection and execution timeout from admin runtime config.
- Replay-safe persistence: write job state changes before emitting UI events.
- Heartbeat-based failover for detector/executor reassignment.
## Backward Compatibility
- Legacy `worker.proto` runtime remains internally available where still referenced.
- External CLI worker path is moved to plugin runtime behavior.
- Runtime is enabled by default on admin worker gRPC server.
## Incremental Rollout Plan
Phase 1
- Introduce protocol and storage models only.
Phase 2
- Build admin registry/scheduler/dispatcher behind feature flag.
Phase 3
- Add dedicated plugin UI pages and metrics.
Phase 4
- Port one existing job type (e.g. vacuum) as external worker plugin.
Phase 4 status (starter)
- Added `weed worker` command as an external `plugin.proto` worker process.
- Initial handler implements `vacuum` job type with:
- declarative descriptor/config form response (`ConfigSchemaResponse`),
- detection via master topology scan (`RunDetectionRequest`),
- execution via existing vacuum task logic (`ExecuteJobRequest`),
- heartbeat/load reporting for monitor UI.
- Legacy maintenance-worker-specific CLI path is removed.
Run example:
- Start admin: `weed admin -master=localhost:9333`
- Start worker: `weed worker -admin=localhost:23646`
- Optional explicit job type: `weed worker -admin=localhost:23646 -jobType=vacuum`
- Optional stable worker ID persistence: `weed worker -admin=localhost:23646 -workingDir=/var/lib/seaweedfs-plugin`
Phase 5
- Migrate remaining job types and deprecate old mechanism.
## Agreed Defaults
1. Detector multiplicity
- Exactly one detector worker per job type at a time. Admin selects one worker and runs detection there.
2. Secret handling
- No encryption at rest required for plugin config in this phase.
3. Schema compatibility
- No migration policy required yet; this is a new system.
4. Execution ownership
- Same worker is allowed to do both detection and execution.
5. Retention
- Keep last 10 successful runs and last 10 error runs per job type.

View File

@@ -0,0 +1,739 @@
package plugin
import (
"encoding/json"
"fmt"
"net/url"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
)
const (
pluginDirName = "plugin"
jobTypesDirName = "job_types"
jobsDirName = "jobs"
jobDetailsDirName = "job_details"
activitiesDirName = "activities"
descriptorPBFileName = "descriptor.pb"
descriptorJSONFileName = "descriptor.json"
configPBFileName = "config.pb"
configJSONFileName = "config.json"
runsJSONFileName = "runs.json"
trackedJobsJSONFileName = "tracked_jobs.json"
activitiesJSONFileName = "activities.json"
defaultDirPerm = 0o755
defaultFilePerm = 0o644
)
// validJobTypePattern is the canonical pattern for safe job type names.
// Only letters, digits, underscore, dash, and dot are allowed, which prevents
// path traversal because '/', '\\', and whitespace are rejected.
var validJobTypePattern = regexp.MustCompile(`^[A-Za-z0-9_.-]+$`)
// ConfigStore persists plugin configuration and bounded run history.
// If admin data dir is empty, it transparently falls back to in-memory mode.
type ConfigStore struct {
configured bool
baseDir string
mu sync.RWMutex
memDescriptors map[string]*plugin_pb.JobTypeDescriptor
memConfigs map[string]*plugin_pb.PersistedJobTypeConfig
memRunHistory map[string]*JobTypeRunHistory
memTrackedJobs []TrackedJob
memActivities []JobActivity
memJobDetails map[string]TrackedJob
}
func NewConfigStore(adminDataDir string) (*ConfigStore, error) {
store := &ConfigStore{
configured: adminDataDir != "",
memDescriptors: make(map[string]*plugin_pb.JobTypeDescriptor),
memConfigs: make(map[string]*plugin_pb.PersistedJobTypeConfig),
memRunHistory: make(map[string]*JobTypeRunHistory),
memJobDetails: make(map[string]TrackedJob),
}
if adminDataDir == "" {
return store, nil
}
store.baseDir = filepath.Join(adminDataDir, pluginDirName)
if err := os.MkdirAll(filepath.Join(store.baseDir, jobTypesDirName), defaultDirPerm); err != nil {
return nil, fmt.Errorf("create plugin job_types dir: %w", err)
}
if err := os.MkdirAll(filepath.Join(store.baseDir, jobsDirName), defaultDirPerm); err != nil {
return nil, fmt.Errorf("create plugin jobs dir: %w", err)
}
if err := os.MkdirAll(filepath.Join(store.baseDir, jobsDirName, jobDetailsDirName), defaultDirPerm); err != nil {
return nil, fmt.Errorf("create plugin job_details dir: %w", err)
}
if err := os.MkdirAll(filepath.Join(store.baseDir, activitiesDirName), defaultDirPerm); err != nil {
return nil, fmt.Errorf("create plugin activities dir: %w", err)
}
return store, nil
}
func (s *ConfigStore) IsConfigured() bool {
return s.configured
}
func (s *ConfigStore) BaseDir() string {
return s.baseDir
}
func (s *ConfigStore) SaveDescriptor(jobType string, descriptor *plugin_pb.JobTypeDescriptor) error {
if descriptor == nil {
return fmt.Errorf("descriptor is nil")
}
if _, err := sanitizeJobType(jobType); err != nil {
return err
}
clone := proto.Clone(descriptor).(*plugin_pb.JobTypeDescriptor)
if clone.JobType == "" {
clone.JobType = jobType
}
s.mu.Lock()
defer s.mu.Unlock()
if !s.configured {
s.memDescriptors[jobType] = clone
return nil
}
jobTypeDir, err := s.ensureJobTypeDir(jobType)
if err != nil {
return err
}
pbPath := filepath.Join(jobTypeDir, descriptorPBFileName)
jsonPath := filepath.Join(jobTypeDir, descriptorJSONFileName)
if err := writeProtoFiles(clone, pbPath, jsonPath); err != nil {
return fmt.Errorf("save descriptor for %s: %w", jobType, err)
}
return nil
}
func (s *ConfigStore) LoadDescriptor(jobType string) (*plugin_pb.JobTypeDescriptor, error) {
if _, err := sanitizeJobType(jobType); err != nil {
return nil, err
}
s.mu.RLock()
if !s.configured {
d := s.memDescriptors[jobType]
s.mu.RUnlock()
if d == nil {
return nil, nil
}
return proto.Clone(d).(*plugin_pb.JobTypeDescriptor), nil
}
s.mu.RUnlock()
pbPath := filepath.Join(s.baseDir, jobTypesDirName, jobType, descriptorPBFileName)
data, err := os.ReadFile(pbPath)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, fmt.Errorf("read descriptor for %s: %w", jobType, err)
}
var descriptor plugin_pb.JobTypeDescriptor
if err := proto.Unmarshal(data, &descriptor); err != nil {
return nil, fmt.Errorf("unmarshal descriptor for %s: %w", jobType, err)
}
return &descriptor, nil
}
func (s *ConfigStore) SaveJobTypeConfig(config *plugin_pb.PersistedJobTypeConfig) error {
if config == nil {
return fmt.Errorf("job type config is nil")
}
if config.JobType == "" {
return fmt.Errorf("job type config has empty job_type")
}
sanitizedJobType, err := sanitizeJobType(config.JobType)
if err != nil {
return err
}
// Use the sanitized job type going forward to ensure it is safe for filesystem paths.
config.JobType = sanitizedJobType
clone := proto.Clone(config).(*plugin_pb.PersistedJobTypeConfig)
s.mu.Lock()
defer s.mu.Unlock()
if !s.configured {
s.memConfigs[config.JobType] = clone
return nil
}
jobTypeDir, err := s.ensureJobTypeDir(config.JobType)
if err != nil {
return err
}
pbPath := filepath.Join(jobTypeDir, configPBFileName)
jsonPath := filepath.Join(jobTypeDir, configJSONFileName)
if err := writeProtoFiles(clone, pbPath, jsonPath); err != nil {
return fmt.Errorf("save job type config for %s: %w", config.JobType, err)
}
return nil
}
func (s *ConfigStore) LoadJobTypeConfig(jobType string) (*plugin_pb.PersistedJobTypeConfig, error) {
if _, err := sanitizeJobType(jobType); err != nil {
return nil, err
}
s.mu.RLock()
if !s.configured {
cfg := s.memConfigs[jobType]
s.mu.RUnlock()
if cfg == nil {
return nil, nil
}
return proto.Clone(cfg).(*plugin_pb.PersistedJobTypeConfig), nil
}
s.mu.RUnlock()
pbPath := filepath.Join(s.baseDir, jobTypesDirName, jobType, configPBFileName)
data, err := os.ReadFile(pbPath)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, fmt.Errorf("read job type config for %s: %w", jobType, err)
}
var config plugin_pb.PersistedJobTypeConfig
if err := proto.Unmarshal(data, &config); err != nil {
return nil, fmt.Errorf("unmarshal job type config for %s: %w", jobType, err)
}
return &config, nil
}
func (s *ConfigStore) AppendRunRecord(jobType string, record *JobRunRecord) error {
if record == nil {
return fmt.Errorf("run record is nil")
}
if _, err := sanitizeJobType(jobType); err != nil {
return err
}
safeRecord := *record
if safeRecord.JobType == "" {
safeRecord.JobType = jobType
}
if safeRecord.CompletedAt == nil || safeRecord.CompletedAt.IsZero() {
safeRecord.CompletedAt = timeToPtr(time.Now().UTC())
}
s.mu.Lock()
defer s.mu.Unlock()
history, err := s.loadRunHistoryLocked(jobType)
if err != nil {
return err
}
if safeRecord.Outcome == RunOutcomeSuccess {
history.SuccessfulRuns = append(history.SuccessfulRuns, safeRecord)
} else {
safeRecord.Outcome = RunOutcomeError
history.ErrorRuns = append(history.ErrorRuns, safeRecord)
}
history.SuccessfulRuns = trimRuns(history.SuccessfulRuns, MaxSuccessfulRunHistory)
history.ErrorRuns = trimRuns(history.ErrorRuns, MaxErrorRunHistory)
history.LastUpdatedTime = timeToPtr(time.Now().UTC())
return s.saveRunHistoryLocked(jobType, history)
}
func (s *ConfigStore) LoadRunHistory(jobType string) (*JobTypeRunHistory, error) {
if _, err := sanitizeJobType(jobType); err != nil {
return nil, err
}
s.mu.Lock()
defer s.mu.Unlock()
history, err := s.loadRunHistoryLocked(jobType)
if err != nil {
return nil, err
}
return cloneRunHistory(history), nil
}
func (s *ConfigStore) SaveTrackedJobs(jobs []TrackedJob) error {
s.mu.Lock()
defer s.mu.Unlock()
clone := cloneTrackedJobs(jobs)
if !s.configured {
s.memTrackedJobs = clone
return nil
}
encoded, err := json.MarshalIndent(clone, "", " ")
if err != nil {
return fmt.Errorf("encode tracked jobs: %w", err)
}
path := filepath.Join(s.baseDir, jobsDirName, trackedJobsJSONFileName)
if err := atomicWriteFile(path, encoded, defaultFilePerm); err != nil {
return fmt.Errorf("write tracked jobs: %w", err)
}
return nil
}
func (s *ConfigStore) LoadTrackedJobs() ([]TrackedJob, error) {
s.mu.RLock()
if !s.configured {
out := cloneTrackedJobs(s.memTrackedJobs)
s.mu.RUnlock()
return out, nil
}
s.mu.RUnlock()
path := filepath.Join(s.baseDir, jobsDirName, trackedJobsJSONFileName)
data, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, fmt.Errorf("read tracked jobs: %w", err)
}
var jobs []TrackedJob
if err := json.Unmarshal(data, &jobs); err != nil {
return nil, fmt.Errorf("parse tracked jobs: %w", err)
}
return cloneTrackedJobs(jobs), nil
}
func (s *ConfigStore) SaveJobDetail(job TrackedJob) error {
jobID, err := sanitizeJobID(job.JobID)
if err != nil {
return err
}
s.mu.Lock()
defer s.mu.Unlock()
clone := cloneTrackedJob(job)
clone.JobID = jobID
if !s.configured {
s.memJobDetails[jobID] = clone
return nil
}
encoded, err := json.MarshalIndent(clone, "", " ")
if err != nil {
return fmt.Errorf("encode job detail: %w", err)
}
path := filepath.Join(s.baseDir, jobsDirName, jobDetailsDirName, jobDetailFileName(jobID))
if err := atomicWriteFile(path, encoded, defaultFilePerm); err != nil {
return fmt.Errorf("write job detail: %w", err)
}
return nil
}
func (s *ConfigStore) LoadJobDetail(jobID string) (*TrackedJob, error) {
jobID, err := sanitizeJobID(jobID)
if err != nil {
return nil, err
}
s.mu.RLock()
if !s.configured {
job, ok := s.memJobDetails[jobID]
s.mu.RUnlock()
if !ok {
return nil, nil
}
clone := cloneTrackedJob(job)
return &clone, nil
}
s.mu.RUnlock()
path := filepath.Join(s.baseDir, jobsDirName, jobDetailsDirName, jobDetailFileName(jobID))
data, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, fmt.Errorf("read job detail: %w", err)
}
var job TrackedJob
if err := json.Unmarshal(data, &job); err != nil {
return nil, fmt.Errorf("parse job detail: %w", err)
}
clone := cloneTrackedJob(job)
return &clone, nil
}
func (s *ConfigStore) SaveActivities(activities []JobActivity) error {
s.mu.Lock()
defer s.mu.Unlock()
clone := cloneActivities(activities)
if !s.configured {
s.memActivities = clone
return nil
}
encoded, err := json.MarshalIndent(clone, "", " ")
if err != nil {
return fmt.Errorf("encode activities: %w", err)
}
path := filepath.Join(s.baseDir, activitiesDirName, activitiesJSONFileName)
if err := atomicWriteFile(path, encoded, defaultFilePerm); err != nil {
return fmt.Errorf("write activities: %w", err)
}
return nil
}
func (s *ConfigStore) LoadActivities() ([]JobActivity, error) {
s.mu.RLock()
if !s.configured {
out := cloneActivities(s.memActivities)
s.mu.RUnlock()
return out, nil
}
s.mu.RUnlock()
path := filepath.Join(s.baseDir, activitiesDirName, activitiesJSONFileName)
data, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, fmt.Errorf("read activities: %w", err)
}
var activities []JobActivity
if err := json.Unmarshal(data, &activities); err != nil {
return nil, fmt.Errorf("parse activities: %w", err)
}
return cloneActivities(activities), nil
}
func (s *ConfigStore) ListJobTypes() ([]string, error) {
s.mu.RLock()
defer s.mu.RUnlock()
jobTypeSet := make(map[string]struct{})
if !s.configured {
for jobType := range s.memDescriptors {
jobTypeSet[jobType] = struct{}{}
}
for jobType := range s.memConfigs {
jobTypeSet[jobType] = struct{}{}
}
for jobType := range s.memRunHistory {
jobTypeSet[jobType] = struct{}{}
}
} else {
jobTypesPath := filepath.Join(s.baseDir, jobTypesDirName)
entries, err := os.ReadDir(jobTypesPath)
if err != nil {
if os.IsNotExist(err) {
return []string{}, nil
}
return nil, fmt.Errorf("list job types: %w", err)
}
for _, entry := range entries {
if !entry.IsDir() {
continue
}
jobType := strings.TrimSpace(entry.Name())
if _, err := sanitizeJobType(jobType); err != nil {
continue
}
jobTypeSet[jobType] = struct{}{}
}
}
jobTypes := make([]string, 0, len(jobTypeSet))
for jobType := range jobTypeSet {
jobTypes = append(jobTypes, jobType)
}
sort.Strings(jobTypes)
return jobTypes, nil
}
func (s *ConfigStore) loadRunHistoryLocked(jobType string) (*JobTypeRunHistory, error) {
if !s.configured {
history, ok := s.memRunHistory[jobType]
if !ok {
history = &JobTypeRunHistory{JobType: jobType}
s.memRunHistory[jobType] = history
}
return cloneRunHistory(history), nil
}
runsPath := filepath.Join(s.baseDir, jobTypesDirName, jobType, runsJSONFileName)
data, err := os.ReadFile(runsPath)
if err != nil {
if os.IsNotExist(err) {
return &JobTypeRunHistory{JobType: jobType}, nil
}
return nil, fmt.Errorf("read run history for %s: %w", jobType, err)
}
var history JobTypeRunHistory
if err := json.Unmarshal(data, &history); err != nil {
return nil, fmt.Errorf("parse run history for %s: %w", jobType, err)
}
if history.JobType == "" {
history.JobType = jobType
}
return &history, nil
}
func (s *ConfigStore) saveRunHistoryLocked(jobType string, history *JobTypeRunHistory) error {
if !s.configured {
s.memRunHistory[jobType] = cloneRunHistory(history)
return nil
}
jobTypeDir, err := s.ensureJobTypeDir(jobType)
if err != nil {
return err
}
encoded, err := json.MarshalIndent(history, "", " ")
if err != nil {
return fmt.Errorf("encode run history for %s: %w", jobType, err)
}
runsPath := filepath.Join(jobTypeDir, runsJSONFileName)
if err := atomicWriteFile(runsPath, encoded, defaultFilePerm); err != nil {
return fmt.Errorf("write run history for %s: %w", jobType, err)
}
return nil
}
func (s *ConfigStore) ensureJobTypeDir(jobType string) (string, error) {
if !s.configured {
return "", nil
}
jobTypeDir := filepath.Join(s.baseDir, jobTypesDirName, jobType)
if err := os.MkdirAll(jobTypeDir, defaultDirPerm); err != nil {
return "", fmt.Errorf("create job type dir for %s: %w", jobType, err)
}
return jobTypeDir, nil
}
func sanitizeJobType(jobType string) (string, error) {
jobType = strings.TrimSpace(jobType)
if jobType == "" {
return "", fmt.Errorf("job type is empty")
}
// Enforce a strict, path-safe pattern for job types: only letters, digits, underscore, dash and dot.
// This prevents path traversal because '/', '\\' and whitespace are rejected.
if !validJobTypePattern.MatchString(jobType) {
return "", fmt.Errorf("invalid job type %q: must match %s", jobType, validJobTypePattern.String())
}
return jobType, nil
}
// validJobIDPattern allows letters, digits, dash, underscore, and dot.
// url.PathEscape in jobDetailFileName provides a second layer of defense.
var validJobIDPattern = regexp.MustCompile(`^[A-Za-z0-9_.-]+$`)
func sanitizeJobID(jobID string) (string, error) {
jobID = strings.TrimSpace(jobID)
if jobID == "" {
return "", fmt.Errorf("job id is empty")
}
if !validJobIDPattern.MatchString(jobID) {
return "", fmt.Errorf("invalid job id %q: must match %s", jobID, validJobIDPattern.String())
}
return jobID, nil
}
func jobDetailFileName(jobID string) string {
return url.PathEscape(jobID) + ".json"
}
func trimRuns(runs []JobRunRecord, maxKeep int) []JobRunRecord {
if len(runs) == 0 {
return runs
}
sort.Slice(runs, func(i, j int) bool {
ti := time.Time{}
if runs[i].CompletedAt != nil {
ti = *runs[i].CompletedAt
}
tj := time.Time{}
if runs[j].CompletedAt != nil {
tj = *runs[j].CompletedAt
}
return ti.After(tj)
})
if len(runs) > maxKeep {
runs = runs[:maxKeep]
}
return runs
}
func cloneRunHistory(in *JobTypeRunHistory) *JobTypeRunHistory {
if in == nil {
return nil
}
out := *in
if in.SuccessfulRuns != nil {
out.SuccessfulRuns = append([]JobRunRecord(nil), in.SuccessfulRuns...)
}
if in.ErrorRuns != nil {
out.ErrorRuns = append([]JobRunRecord(nil), in.ErrorRuns...)
}
return &out
}
func cloneTrackedJobs(in []TrackedJob) []TrackedJob {
if len(in) == 0 {
return nil
}
out := make([]TrackedJob, len(in))
for i := range in {
out[i] = cloneTrackedJob(in[i])
}
return out
}
func cloneTrackedJob(in TrackedJob) TrackedJob {
out := in
if in.Parameters != nil {
out.Parameters = make(map[string]interface{}, len(in.Parameters))
for key, value := range in.Parameters {
out.Parameters[key] = deepCopyGenericValue(value)
}
}
if in.Labels != nil {
out.Labels = make(map[string]string, len(in.Labels))
for key, value := range in.Labels {
out.Labels[key] = value
}
}
if in.ResultOutputValues != nil {
out.ResultOutputValues = make(map[string]interface{}, len(in.ResultOutputValues))
for key, value := range in.ResultOutputValues {
out.ResultOutputValues[key] = deepCopyGenericValue(value)
}
}
return out
}
func deepCopyGenericValue(val interface{}) interface{} {
switch v := val.(type) {
case map[string]interface{}:
res := make(map[string]interface{}, len(v))
for k, val := range v {
res[k] = deepCopyGenericValue(val)
}
return res
case []interface{}:
res := make([]interface{}, len(v))
for i, val := range v {
res[i] = deepCopyGenericValue(val)
}
return res
default:
return v
}
}
func cloneActivities(in []JobActivity) []JobActivity {
if len(in) == 0 {
return nil
}
out := make([]JobActivity, len(in))
for i := range in {
out[i] = in[i]
if in[i].Details != nil {
out[i].Details = make(map[string]interface{}, len(in[i].Details))
for key, value := range in[i].Details {
out[i].Details[key] = deepCopyGenericValue(value)
}
}
}
return out
}
// writeProtoFiles writes message to both a binary protobuf file (pbPath) and a
// human-readable JSON file (jsonPath) using atomicWriteFile for each.
// The .pb file is the authoritative source of truth: all reads use proto.Unmarshal
// on the .pb file. The .json file is for human inspection only, so a partial
// failure where .pb succeeds but .json fails leaves the store in a consistent state.
func writeProtoFiles(message proto.Message, pbPath string, jsonPath string) error {
pbData, err := proto.Marshal(message)
if err != nil {
return fmt.Errorf("marshal protobuf: %w", err)
}
if err := atomicWriteFile(pbPath, pbData, defaultFilePerm); err != nil {
return fmt.Errorf("write protobuf file: %w", err)
}
jsonData, err := protojson.MarshalOptions{
Multiline: true,
Indent: " ",
EmitUnpopulated: true,
}.Marshal(message)
if err != nil {
return fmt.Errorf("marshal json: %w", err)
}
if err := atomicWriteFile(jsonPath, jsonData, defaultFilePerm); err != nil {
return fmt.Errorf("write json file: %w", err)
}
return nil
}
func atomicWriteFile(filename string, data []byte, perm os.FileMode) error {
dir := filepath.Dir(filename)
if err := os.MkdirAll(dir, defaultDirPerm); err != nil {
return fmt.Errorf("create directory %s: %w", dir, err)
}
tmpFile := filename + ".tmp"
if err := os.WriteFile(tmpFile, data, perm); err != nil {
return err
}
if err := os.Rename(tmpFile, filename); err != nil {
_ = os.Remove(tmpFile)
return err
}
return nil
}

View File

@@ -0,0 +1,257 @@
package plugin
import (
"reflect"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
)
func TestConfigStoreDescriptorRoundTrip(t *testing.T) {
t.Parallel()
tempDir := t.TempDir()
store, err := NewConfigStore(tempDir)
if err != nil {
t.Fatalf("NewConfigStore: %v", err)
}
descriptor := &plugin_pb.JobTypeDescriptor{
JobType: "vacuum",
DisplayName: "Vacuum",
Description: "Vacuum volumes",
DescriptorVersion: 1,
}
if err := store.SaveDescriptor("vacuum", descriptor); err != nil {
t.Fatalf("SaveDescriptor: %v", err)
}
got, err := store.LoadDescriptor("vacuum")
if err != nil {
t.Fatalf("LoadDescriptor: %v", err)
}
if got == nil {
t.Fatalf("LoadDescriptor: nil descriptor")
}
if got.DisplayName != descriptor.DisplayName {
t.Fatalf("unexpected display name: got %q want %q", got.DisplayName, descriptor.DisplayName)
}
}
func TestConfigStoreRunHistoryRetention(t *testing.T) {
t.Parallel()
store, err := NewConfigStore(t.TempDir())
if err != nil {
t.Fatalf("NewConfigStore: %v", err)
}
base := time.Now().UTC().Add(-24 * time.Hour)
for i := 0; i < 15; i++ {
err := store.AppendRunRecord("balance", &JobRunRecord{
RunID: "s" + time.Duration(i).String(),
JobID: "job-success",
JobType: "balance",
WorkerID: "worker-a",
Outcome: RunOutcomeSuccess,
CompletedAt: timeToPtr(base.Add(time.Duration(i) * time.Minute)),
})
if err != nil {
t.Fatalf("AppendRunRecord success[%d]: %v", i, err)
}
}
for i := 0; i < 12; i++ {
err := store.AppendRunRecord("balance", &JobRunRecord{
RunID: "e" + time.Duration(i).String(),
JobID: "job-error",
JobType: "balance",
WorkerID: "worker-b",
Outcome: RunOutcomeError,
CompletedAt: timeToPtr(base.Add(time.Duration(i) * time.Minute)),
})
if err != nil {
t.Fatalf("AppendRunRecord error[%d]: %v", i, err)
}
}
history, err := store.LoadRunHistory("balance")
if err != nil {
t.Fatalf("LoadRunHistory: %v", err)
}
if len(history.SuccessfulRuns) != MaxSuccessfulRunHistory {
t.Fatalf("successful retention mismatch: got %d want %d", len(history.SuccessfulRuns), MaxSuccessfulRunHistory)
}
if len(history.ErrorRuns) != MaxErrorRunHistory {
t.Fatalf("error retention mismatch: got %d want %d", len(history.ErrorRuns), MaxErrorRunHistory)
}
for i := 1; i < len(history.SuccessfulRuns); i++ {
t1 := time.Time{}
if history.SuccessfulRuns[i-1].CompletedAt != nil {
t1 = *history.SuccessfulRuns[i-1].CompletedAt
}
t2 := time.Time{}
if history.SuccessfulRuns[i].CompletedAt != nil {
t2 = *history.SuccessfulRuns[i].CompletedAt
}
if t1.Before(t2) {
t.Fatalf("successful run order not descending at %d", i)
}
}
for i := 1; i < len(history.ErrorRuns); i++ {
t1 := time.Time{}
if history.ErrorRuns[i-1].CompletedAt != nil {
t1 = *history.ErrorRuns[i-1].CompletedAt
}
t2 := time.Time{}
if history.ErrorRuns[i].CompletedAt != nil {
t2 = *history.ErrorRuns[i].CompletedAt
}
if t1.Before(t2) {
t.Fatalf("error run order not descending at %d", i)
}
}
}
func TestConfigStoreListJobTypes(t *testing.T) {
t.Parallel()
store, err := NewConfigStore("")
if err != nil {
t.Fatalf("NewConfigStore: %v", err)
}
if err := store.SaveDescriptor("vacuum", &plugin_pb.JobTypeDescriptor{JobType: "vacuum"}); err != nil {
t.Fatalf("SaveDescriptor: %v", err)
}
if err := store.SaveJobTypeConfig(&plugin_pb.PersistedJobTypeConfig{
JobType: "balance",
AdminRuntime: &plugin_pb.AdminRuntimeConfig{Enabled: true},
}); err != nil {
t.Fatalf("SaveJobTypeConfig: %v", err)
}
if err := store.AppendRunRecord("ec", &JobRunRecord{Outcome: RunOutcomeSuccess, CompletedAt: timeToPtr(time.Now().UTC())}); err != nil {
t.Fatalf("AppendRunRecord: %v", err)
}
got, err := store.ListJobTypes()
if err != nil {
t.Fatalf("ListJobTypes: %v", err)
}
want := []string{"balance", "ec", "vacuum"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("unexpected job types: got=%v want=%v", got, want)
}
}
func TestConfigStoreMonitorStateRoundTrip(t *testing.T) {
t.Parallel()
store, err := NewConfigStore(t.TempDir())
if err != nil {
t.Fatalf("NewConfigStore: %v", err)
}
tracked := []TrackedJob{
{
JobID: "job-1",
JobType: "vacuum",
State: "running",
Progress: 55,
WorkerID: "worker-a",
CreatedAt: timeToPtr(time.Now().UTC().Add(-2 * time.Minute)),
UpdatedAt: timeToPtr(time.Now().UTC().Add(-1 * time.Minute)),
},
}
activities := []JobActivity{
{
JobID: "job-1",
JobType: "vacuum",
Source: "worker_progress",
Message: "processing",
Stage: "running",
OccurredAt: timeToPtr(time.Now().UTC()),
Details: map[string]interface{}{
"step": "scan",
},
},
}
if err := store.SaveTrackedJobs(tracked); err != nil {
t.Fatalf("SaveTrackedJobs: %v", err)
}
if err := store.SaveActivities(activities); err != nil {
t.Fatalf("SaveActivities: %v", err)
}
gotTracked, err := store.LoadTrackedJobs()
if err != nil {
t.Fatalf("LoadTrackedJobs: %v", err)
}
if len(gotTracked) != 1 || gotTracked[0].JobID != tracked[0].JobID {
t.Fatalf("unexpected tracked jobs: %+v", gotTracked)
}
gotActivities, err := store.LoadActivities()
if err != nil {
t.Fatalf("LoadActivities: %v", err)
}
if len(gotActivities) != 1 || gotActivities[0].Message != activities[0].Message {
t.Fatalf("unexpected activities: %+v", gotActivities)
}
if gotActivities[0].Details["step"] != "scan" {
t.Fatalf("unexpected activity details: %+v", gotActivities[0].Details)
}
}
func TestConfigStoreJobDetailRoundTrip(t *testing.T) {
t.Parallel()
store, err := NewConfigStore(t.TempDir())
if err != nil {
t.Fatalf("NewConfigStore: %v", err)
}
input := TrackedJob{
JobID: "job-detail-1",
JobType: "vacuum",
Summary: "detail summary",
Detail: "detail payload",
CreatedAt: timeToPtr(time.Now().UTC().Add(-2 * time.Minute)),
UpdatedAt: timeToPtr(time.Now().UTC()),
Parameters: map[string]interface{}{
"volume_id": map[string]interface{}{"int64_value": "3"},
},
Labels: map[string]string{
"source": "detector",
},
ResultOutputValues: map[string]interface{}{
"moved": map[string]interface{}{"bool_value": true},
},
}
if err := store.SaveJobDetail(input); err != nil {
t.Fatalf("SaveJobDetail: %v", err)
}
got, err := store.LoadJobDetail(input.JobID)
if err != nil {
t.Fatalf("LoadJobDetail: %v", err)
}
if got == nil {
t.Fatalf("LoadJobDetail returned nil")
}
if got.Detail != input.Detail {
t.Fatalf("unexpected detail: got=%q want=%q", got.Detail, input.Detail)
}
if got.Labels["source"] != "detector" {
t.Fatalf("unexpected labels: %+v", got.Labels)
}
if got.ResultOutputValues == nil {
t.Fatalf("expected result output values")
}
}

View File

@@ -0,0 +1,231 @@
package plugin
import (
"encoding/base64"
"sort"
"strconv"
"strings"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"google.golang.org/protobuf/proto"
)
func enrichTrackedJobParameters(jobType string, parameters map[string]interface{}) map[string]interface{} {
if len(parameters) == 0 {
return parameters
}
if _, exists := parameters["execution_plan"]; exists {
return parameters
}
taskParams, ok := decodeTaskParamsFromPlainParameters(parameters)
if !ok || taskParams == nil {
return parameters
}
plan := buildExecutionPlan(strings.TrimSpace(jobType), taskParams)
if plan == nil {
return parameters
}
enriched := make(map[string]interface{}, len(parameters)+1)
for key, value := range parameters {
enriched[key] = value
}
enriched["execution_plan"] = plan
return enriched
}
func decodeTaskParamsFromPlainParameters(parameters map[string]interface{}) (*worker_pb.TaskParams, bool) {
rawField, ok := parameters["task_params_pb"]
if !ok || rawField == nil {
return nil, false
}
fieldMap, ok := rawField.(map[string]interface{})
if !ok {
return nil, false
}
bytesValue, _ := fieldMap["bytes_value"].(string)
bytesValue = strings.TrimSpace(bytesValue)
if bytesValue == "" {
return nil, false
}
payload, err := base64.StdEncoding.DecodeString(bytesValue)
if err != nil {
return nil, false
}
params := &worker_pb.TaskParams{}
if err := proto.Unmarshal(payload, params); err != nil {
return nil, false
}
return params, true
}
func buildExecutionPlan(jobType string, params *worker_pb.TaskParams) map[string]interface{} {
if params == nil {
return nil
}
normalizedJobType := strings.TrimSpace(jobType)
if normalizedJobType == "" && params.GetErasureCodingParams() != nil {
normalizedJobType = "erasure_coding"
}
switch normalizedJobType {
case "erasure_coding":
return buildErasureCodingExecutionPlan(params)
default:
return nil
}
}
func buildErasureCodingExecutionPlan(params *worker_pb.TaskParams) map[string]interface{} {
if params == nil {
return nil
}
ecParams := params.GetErasureCodingParams()
if ecParams == nil {
return nil
}
dataShards := int(ecParams.DataShards)
if dataShards <= 0 {
dataShards = int(erasure_coding.DataShardsCount)
}
parityShards := int(ecParams.ParityShards)
if parityShards <= 0 {
parityShards = int(erasure_coding.ParityShardsCount)
}
totalShards := dataShards + parityShards
sources := make([]map[string]interface{}, 0, len(params.Sources))
for _, source := range params.Sources {
if source == nil {
continue
}
sources = append(sources, buildExecutionEndpoint(
source.Node,
source.DataCenter,
source.Rack,
source.VolumeId,
source.ShardIds,
dataShards,
))
}
targets := make([]map[string]interface{}, 0, len(params.Targets))
shardAssignments := make([]map[string]interface{}, 0, totalShards)
for targetIndex, target := range params.Targets {
if target == nil {
continue
}
targets = append(targets, buildExecutionEndpoint(
target.Node,
target.DataCenter,
target.Rack,
target.VolumeId,
target.ShardIds,
dataShards,
))
for _, shardID := range normalizeShardIDs(target.ShardIds) {
kind, label := classifyShardID(shardID, dataShards)
shardAssignments = append(shardAssignments, map[string]interface{}{
"shard_id": shardID,
"kind": kind,
"label": label,
"target_index": targetIndex + 1,
"target_node": strings.TrimSpace(target.Node),
"target_data_center": strings.TrimSpace(target.DataCenter),
"target_rack": strings.TrimSpace(target.Rack),
"target_volume_id": int(target.VolumeId),
})
}
}
sort.Slice(shardAssignments, func(i, j int) bool {
left, _ := shardAssignments[i]["shard_id"].(int)
right, _ := shardAssignments[j]["shard_id"].(int)
return left < right
})
plan := map[string]interface{}{
"job_type": "erasure_coding",
"task_id": strings.TrimSpace(params.TaskId),
"volume_id": int(params.VolumeId),
"collection": strings.TrimSpace(params.Collection),
"data_shards": dataShards,
"parity_shards": parityShards,
"total_shards": totalShards,
"sources": sources,
"targets": targets,
"source_count": len(sources),
"target_count": len(targets),
}
if len(shardAssignments) > 0 {
plan["shard_assignments"] = shardAssignments
}
return plan
}
func buildExecutionEndpoint(
node string,
dataCenter string,
rack string,
volumeID uint32,
shardIDs []uint32,
dataShardCount int,
) map[string]interface{} {
allShards := normalizeShardIDs(shardIDs)
dataShards := make([]int, 0, len(allShards))
parityShards := make([]int, 0, len(allShards))
for _, shardID := range allShards {
if shardID < dataShardCount {
dataShards = append(dataShards, shardID)
} else {
parityShards = append(parityShards, shardID)
}
}
return map[string]interface{}{
"node": strings.TrimSpace(node),
"data_center": strings.TrimSpace(dataCenter),
"rack": strings.TrimSpace(rack),
"volume_id": int(volumeID),
"shard_ids": allShards,
"data_shard_ids": dataShards,
"parity_shard_ids": parityShards,
}
}
func normalizeShardIDs(shardIDs []uint32) []int {
if len(shardIDs) == 0 {
return nil
}
out := make([]int, 0, len(shardIDs))
for _, shardID := range shardIDs {
out = append(out, int(shardID))
}
sort.Ints(out)
return out
}
func classifyShardID(shardID int, dataShardCount int) (kind string, label string) {
if dataShardCount <= 0 {
dataShardCount = int(erasure_coding.DataShardsCount)
}
if shardID < dataShardCount {
return "data", "D" + strconv.Itoa(shardID)
}
return "parity", "P" + strconv.Itoa(shardID)
}

1243
weed/admin/plugin/plugin.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,112 @@
package plugin
import (
"context"
"errors"
"testing"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
)
func TestRunDetectionSendsCancelOnContextDone(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New plugin error: %v", err)
}
defer pluginSvc.Shutdown()
const workerID = "worker-detect"
const jobType = "vacuum"
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: workerID,
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: jobType, CanDetect: true, MaxDetectionConcurrency: 1},
},
})
session := &streamSession{workerID: workerID, outgoing: make(chan *plugin_pb.AdminToWorkerMessage, 4)}
pluginSvc.putSession(session)
ctx, cancel := context.WithCancel(context.Background())
errCh := make(chan error, 1)
go func() {
_, runErr := pluginSvc.RunDetection(ctx, jobType, &plugin_pb.ClusterContext{}, 10)
errCh <- runErr
}()
first := <-session.outgoing
if first.GetRunDetectionRequest() == nil {
t.Fatalf("expected first message to be run_detection_request")
}
cancel()
second := <-session.outgoing
cancelReq := second.GetCancelRequest()
if cancelReq == nil {
t.Fatalf("expected second message to be cancel_request")
}
if cancelReq.TargetId != first.RequestId {
t.Fatalf("unexpected cancel target id: got=%s want=%s", cancelReq.TargetId, first.RequestId)
}
if cancelReq.TargetKind != plugin_pb.WorkKind_WORK_KIND_DETECTION {
t.Fatalf("unexpected cancel target kind: %v", cancelReq.TargetKind)
}
runErr := <-errCh
if !errors.Is(runErr, context.Canceled) {
t.Fatalf("expected context canceled error, got %v", runErr)
}
}
func TestExecuteJobSendsCancelOnContextDone(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New plugin error: %v", err)
}
defer pluginSvc.Shutdown()
const workerID = "worker-exec"
const jobType = "vacuum"
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: workerID,
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: jobType, CanExecute: true, MaxExecutionConcurrency: 1},
},
})
session := &streamSession{workerID: workerID, outgoing: make(chan *plugin_pb.AdminToWorkerMessage, 4)}
pluginSvc.putSession(session)
job := &plugin_pb.JobSpec{JobId: "job-1", JobType: jobType}
ctx, cancel := context.WithCancel(context.Background())
errCh := make(chan error, 1)
go func() {
_, runErr := pluginSvc.ExecuteJob(ctx, job, &plugin_pb.ClusterContext{}, 1)
errCh <- runErr
}()
first := <-session.outgoing
if first.GetExecuteJobRequest() == nil {
t.Fatalf("expected first message to be execute_job_request")
}
cancel()
second := <-session.outgoing
cancelReq := second.GetCancelRequest()
if cancelReq == nil {
t.Fatalf("expected second message to be cancel_request")
}
if cancelReq.TargetId != first.RequestId {
t.Fatalf("unexpected cancel target id: got=%s want=%s", cancelReq.TargetId, first.RequestId)
}
if cancelReq.TargetKind != plugin_pb.WorkKind_WORK_KIND_EXECUTION {
t.Fatalf("unexpected cancel target kind: %v", cancelReq.TargetKind)
}
runErr := <-errCh
if !errors.Is(runErr, context.Canceled) {
t.Fatalf("expected context canceled error, got %v", runErr)
}
}

View File

@@ -0,0 +1,125 @@
package plugin
import (
"testing"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
)
func TestEnsureJobTypeConfigFromDescriptorBootstrapsDefaults(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
descriptor := &plugin_pb.JobTypeDescriptor{
JobType: "vacuum",
DescriptorVersion: 3,
AdminConfigForm: &plugin_pb.ConfigForm{
DefaultValues: map[string]*plugin_pb.ConfigValue{
"scan_scope": {Kind: &plugin_pb.ConfigValue_StringValue{StringValue: "all"}},
},
},
WorkerConfigForm: &plugin_pb.ConfigForm{
DefaultValues: map[string]*plugin_pb.ConfigValue{
"threshold": {Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0.3}},
},
},
AdminRuntimeDefaults: &plugin_pb.AdminRuntimeDefaults{
Enabled: true,
DetectionIntervalSeconds: 60,
DetectionTimeoutSeconds: 20,
MaxJobsPerDetection: 30,
GlobalExecutionConcurrency: 4,
PerWorkerExecutionConcurrency: 2,
RetryLimit: 3,
RetryBackoffSeconds: 5,
},
}
if err := pluginSvc.ensureJobTypeConfigFromDescriptor("vacuum", descriptor); err != nil {
t.Fatalf("ensureJobTypeConfigFromDescriptor: %v", err)
}
cfg, err := pluginSvc.LoadJobTypeConfig("vacuum")
if err != nil {
t.Fatalf("LoadJobTypeConfig: %v", err)
}
if cfg == nil {
t.Fatalf("expected non-nil config")
}
if cfg.DescriptorVersion != 3 {
t.Fatalf("unexpected descriptor version: got=%d", cfg.DescriptorVersion)
}
if cfg.AdminRuntime == nil || !cfg.AdminRuntime.Enabled {
t.Fatalf("expected enabled admin settings")
}
if cfg.AdminRuntime.GlobalExecutionConcurrency != 4 {
t.Fatalf("unexpected global execution concurrency: %d", cfg.AdminRuntime.GlobalExecutionConcurrency)
}
if _, ok := cfg.AdminConfigValues["scan_scope"]; !ok {
t.Fatalf("missing admin default value")
}
if _, ok := cfg.WorkerConfigValues["threshold"]; !ok {
t.Fatalf("missing worker default value")
}
}
func TestEnsureJobTypeConfigFromDescriptorDoesNotOverwriteExisting(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
if err := pluginSvc.SaveJobTypeConfig(&plugin_pb.PersistedJobTypeConfig{
JobType: "balance",
AdminRuntime: &plugin_pb.AdminRuntimeConfig{
Enabled: true,
GlobalExecutionConcurrency: 9,
},
AdminConfigValues: map[string]*plugin_pb.ConfigValue{
"custom": {Kind: &plugin_pb.ConfigValue_StringValue{StringValue: "keep"}},
},
}); err != nil {
t.Fatalf("SaveJobTypeConfig: %v", err)
}
descriptor := &plugin_pb.JobTypeDescriptor{
JobType: "balance",
DescriptorVersion: 7,
AdminConfigForm: &plugin_pb.ConfigForm{
DefaultValues: map[string]*plugin_pb.ConfigValue{
"custom": {Kind: &plugin_pb.ConfigValue_StringValue{StringValue: "overwrite"}},
},
},
AdminRuntimeDefaults: &plugin_pb.AdminRuntimeDefaults{
Enabled: true,
GlobalExecutionConcurrency: 1,
},
}
if err := pluginSvc.ensureJobTypeConfigFromDescriptor("balance", descriptor); err != nil {
t.Fatalf("ensureJobTypeConfigFromDescriptor: %v", err)
}
cfg, err := pluginSvc.LoadJobTypeConfig("balance")
if err != nil {
t.Fatalf("LoadJobTypeConfig: %v", err)
}
if cfg == nil {
t.Fatalf("expected config")
}
if cfg.AdminRuntime == nil || cfg.AdminRuntime.GlobalExecutionConcurrency != 9 {
t.Fatalf("existing admin settings should be preserved, got=%v", cfg.AdminRuntime)
}
custom := cfg.AdminConfigValues["custom"]
if custom == nil || custom.GetStringValue() != "keep" {
t.Fatalf("existing admin config should be preserved")
}
}

View File

@@ -0,0 +1,197 @@
package plugin
import (
"context"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
)
func TestRunDetectionIncludesLatestSuccessfulRun(t *testing.T) {
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New plugin error: %v", err)
}
defer pluginSvc.Shutdown()
jobType := "vacuum"
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: jobType, CanDetect: true, MaxDetectionConcurrency: 1},
},
})
session := &streamSession{workerID: "worker-a", outgoing: make(chan *plugin_pb.AdminToWorkerMessage, 1)}
pluginSvc.putSession(session)
oldSuccess := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
latestSuccess := time.Date(2026, 2, 1, 0, 0, 0, 0, time.UTC)
if err := pluginSvc.store.AppendRunRecord(jobType, &JobRunRecord{Outcome: RunOutcomeSuccess, CompletedAt: timeToPtr(oldSuccess)}); err != nil {
t.Fatalf("AppendRunRecord old success: %v", err)
}
if err := pluginSvc.store.AppendRunRecord(jobType, &JobRunRecord{Outcome: RunOutcomeError, CompletedAt: timeToPtr(latestSuccess.Add(2 * time.Hour))}); err != nil {
t.Fatalf("AppendRunRecord error run: %v", err)
}
if err := pluginSvc.store.AppendRunRecord(jobType, &JobRunRecord{Outcome: RunOutcomeSuccess, CompletedAt: timeToPtr(latestSuccess)}); err != nil {
t.Fatalf("AppendRunRecord latest success: %v", err)
}
resultCh := make(chan error, 1)
go func() {
_, runErr := pluginSvc.RunDetection(context.Background(), jobType, &plugin_pb.ClusterContext{}, 10)
resultCh <- runErr
}()
message := <-session.outgoing
detectRequest := message.GetRunDetectionRequest()
if detectRequest == nil {
t.Fatalf("expected run detection request message")
}
if detectRequest.LastSuccessfulRun == nil {
t.Fatalf("expected last_successful_run to be set")
}
if got := detectRequest.LastSuccessfulRun.AsTime().UTC(); !got.Equal(latestSuccess) {
t.Fatalf("unexpected last_successful_run, got=%s want=%s", got, latestSuccess)
}
pluginSvc.handleDetectionComplete("worker-a", &plugin_pb.DetectionComplete{
RequestId: message.RequestId,
JobType: jobType,
Success: true,
})
if runErr := <-resultCh; runErr != nil {
t.Fatalf("RunDetection error: %v", runErr)
}
}
func TestRunDetectionOmitsLastSuccessfulRunWhenNoSuccessHistory(t *testing.T) {
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New plugin error: %v", err)
}
defer pluginSvc.Shutdown()
jobType := "vacuum"
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: jobType, CanDetect: true, MaxDetectionConcurrency: 1},
},
})
session := &streamSession{workerID: "worker-a", outgoing: make(chan *plugin_pb.AdminToWorkerMessage, 1)}
pluginSvc.putSession(session)
if err := pluginSvc.store.AppendRunRecord(jobType, &JobRunRecord{
Outcome: RunOutcomeError,
CompletedAt: timeToPtr(time.Date(2026, 2, 10, 0, 0, 0, 0, time.UTC)),
}); err != nil {
t.Fatalf("AppendRunRecord error run: %v", err)
}
resultCh := make(chan error, 1)
go func() {
_, runErr := pluginSvc.RunDetection(context.Background(), jobType, &plugin_pb.ClusterContext{}, 10)
resultCh <- runErr
}()
message := <-session.outgoing
detectRequest := message.GetRunDetectionRequest()
if detectRequest == nil {
t.Fatalf("expected run detection request message")
}
if detectRequest.LastSuccessfulRun != nil {
t.Fatalf("expected last_successful_run to be nil when no success history")
}
pluginSvc.handleDetectionComplete("worker-a", &plugin_pb.DetectionComplete{
RequestId: message.RequestId,
JobType: jobType,
Success: true,
})
if runErr := <-resultCh; runErr != nil {
t.Fatalf("RunDetection error: %v", runErr)
}
}
func TestRunDetectionWithReportCapturesDetectionActivities(t *testing.T) {
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New plugin error: %v", err)
}
defer pluginSvc.Shutdown()
jobType := "vacuum"
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: jobType, CanDetect: true, MaxDetectionConcurrency: 1},
},
})
session := &streamSession{workerID: "worker-a", outgoing: make(chan *plugin_pb.AdminToWorkerMessage, 1)}
pluginSvc.putSession(session)
reportCh := make(chan *DetectionReport, 1)
errCh := make(chan error, 1)
go func() {
report, runErr := pluginSvc.RunDetectionWithReport(context.Background(), jobType, &plugin_pb.ClusterContext{}, 10)
reportCh <- report
errCh <- runErr
}()
message := <-session.outgoing
requestID := message.GetRequestId()
if requestID == "" {
t.Fatalf("expected request id in detection request")
}
pluginSvc.handleDetectionProposals("worker-a", &plugin_pb.DetectionProposals{
RequestId: requestID,
JobType: jobType,
Proposals: []*plugin_pb.JobProposal{
{
ProposalId: "proposal-1",
JobType: jobType,
Summary: "vacuum proposal",
Detail: "based on garbage ratio",
},
},
})
pluginSvc.handleDetectionComplete("worker-a", &plugin_pb.DetectionComplete{
RequestId: requestID,
JobType: jobType,
Success: true,
TotalProposals: 1,
})
report := <-reportCh
if report == nil {
t.Fatalf("expected detection report")
}
if report.RequestID == "" {
t.Fatalf("expected detection report request id")
}
if report.WorkerID != "worker-a" {
t.Fatalf("expected worker-a, got %q", report.WorkerID)
}
if len(report.Proposals) != 1 {
t.Fatalf("expected one proposal in report, got %d", len(report.Proposals))
}
if runErr := <-errCh; runErr != nil {
t.Fatalf("RunDetectionWithReport error: %v", runErr)
}
activities := pluginSvc.ListActivities(jobType, 0)
stages := map[string]bool{}
for _, activity := range activities {
if activity.RequestID != report.RequestID {
continue
}
stages[activity.Stage] = true
}
if !stages["requested"] || !stages["proposal"] || !stages["completed"] {
t.Fatalf("expected requested/proposal/completed activities, got stages=%v", stages)
}
}

View File

@@ -0,0 +1,896 @@
package plugin
import (
"encoding/json"
"sort"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"google.golang.org/protobuf/encoding/protojson"
)
const (
maxTrackedJobsTotal = 1000
maxActivityRecords = 4000
maxRelatedJobs = 100
)
var (
StateSucceeded = strings.ToLower(plugin_pb.JobState_JOB_STATE_SUCCEEDED.String())
StateFailed = strings.ToLower(plugin_pb.JobState_JOB_STATE_FAILED.String())
StateCanceled = strings.ToLower(plugin_pb.JobState_JOB_STATE_CANCELED.String())
)
// activityLess reports whether activity a occurred after activity b (newest-first order).
// A nil OccurredAt is treated as the zero time.
func activityLess(a, b JobActivity) bool {
ta := time.Time{}
if a.OccurredAt != nil {
ta = *a.OccurredAt
}
tb := time.Time{}
if b.OccurredAt != nil {
tb = *b.OccurredAt
}
return ta.After(tb)
}
func (r *Plugin) loadPersistedMonitorState() error {
trackedJobs, err := r.store.LoadTrackedJobs()
if err != nil {
return err
}
activities, err := r.store.LoadActivities()
if err != nil {
return err
}
if len(trackedJobs) > 0 {
r.jobsMu.Lock()
for i := range trackedJobs {
job := trackedJobs[i]
if strings.TrimSpace(job.JobID) == "" {
continue
}
// Backward compatibility: migrate older inline detail payloads
// out of tracked_jobs.json into dedicated per-job detail files.
if hasTrackedJobRichDetails(job) {
if err := r.store.SaveJobDetail(job); err != nil {
glog.Warningf("Plugin failed to migrate detail snapshot for job %s: %v", job.JobID, err)
}
}
stripTrackedJobDetailFields(&job)
jobCopy := job
r.jobs[job.JobID] = &jobCopy
}
r.pruneTrackedJobsLocked()
r.jobsMu.Unlock()
}
if len(activities) > maxActivityRecords {
activities = activities[len(activities)-maxActivityRecords:]
}
if len(activities) > 0 {
r.activitiesMu.Lock()
r.activities = append([]JobActivity(nil), activities...)
r.activitiesMu.Unlock()
}
return nil
}
func (r *Plugin) ListTrackedJobs(jobType string, state string, limit int) []TrackedJob {
r.jobsMu.RLock()
defer r.jobsMu.RUnlock()
normalizedJobType := strings.TrimSpace(jobType)
normalizedState := strings.TrimSpace(strings.ToLower(state))
items := make([]TrackedJob, 0, len(r.jobs))
for _, job := range r.jobs {
if job == nil {
continue
}
if normalizedJobType != "" && job.JobType != normalizedJobType {
continue
}
if normalizedState != "" && strings.ToLower(job.State) != normalizedState {
continue
}
items = append(items, cloneTrackedJob(*job))
}
sort.Slice(items, func(i, j int) bool {
ti := time.Time{}
if items[i].UpdatedAt != nil {
ti = *items[i].UpdatedAt
}
tj := time.Time{}
if items[j].UpdatedAt != nil {
tj = *items[j].UpdatedAt
}
if !ti.Equal(tj) {
return ti.After(tj)
}
return items[i].JobID < items[j].JobID
})
if limit > 0 && len(items) > limit {
items = items[:limit]
}
return items
}
func (r *Plugin) GetTrackedJob(jobID string) (*TrackedJob, bool) {
r.jobsMu.RLock()
defer r.jobsMu.RUnlock()
job, ok := r.jobs[jobID]
if !ok || job == nil {
return nil, false
}
clone := cloneTrackedJob(*job)
return &clone, true
}
func (r *Plugin) ListActivities(jobType string, limit int) []JobActivity {
r.activitiesMu.RLock()
defer r.activitiesMu.RUnlock()
normalized := strings.TrimSpace(jobType)
activities := make([]JobActivity, 0, len(r.activities))
for _, activity := range r.activities {
if normalized != "" && activity.JobType != normalized {
continue
}
activities = append(activities, activity)
}
sort.Slice(activities, func(i, j int) bool {
return activityLess(activities[i], activities[j])
})
if limit > 0 && len(activities) > limit {
activities = activities[:limit]
}
return activities
}
func (r *Plugin) ListJobActivities(jobID string, limit int) []JobActivity {
normalizedJobID := strings.TrimSpace(jobID)
if normalizedJobID == "" {
return nil
}
r.activitiesMu.RLock()
activities := make([]JobActivity, 0, len(r.activities))
for _, activity := range r.activities {
if strings.TrimSpace(activity.JobID) != normalizedJobID {
continue
}
activities = append(activities, activity)
}
r.activitiesMu.RUnlock()
sort.Slice(activities, func(i, j int) bool {
return !activityLess(activities[i], activities[j]) // oldest-first for job timeline
})
if limit > 0 && len(activities) > limit {
activities = activities[len(activities)-limit:]
}
return activities
}
func (r *Plugin) BuildJobDetail(jobID string, activityLimit int, relatedLimit int) (*JobDetail, bool, error) {
normalizedJobID := strings.TrimSpace(jobID)
if normalizedJobID == "" {
return nil, false, nil
}
// Clamp relatedLimit to a safe range to avoid excessive memory allocation from untrusted input.
if relatedLimit <= 0 {
relatedLimit = 0
} else if relatedLimit > maxRelatedJobs {
relatedLimit = maxRelatedJobs
}
r.jobsMu.RLock()
trackedSnapshot, ok := r.jobs[normalizedJobID]
if ok && trackedSnapshot != nil {
candidate := cloneTrackedJob(*trackedSnapshot)
stripTrackedJobDetailFields(&candidate)
trackedSnapshot = &candidate
} else {
trackedSnapshot = nil
}
r.jobsMu.RUnlock()
detailJob, err := r.store.LoadJobDetail(normalizedJobID)
if err != nil {
return nil, false, err
}
if trackedSnapshot == nil && detailJob == nil {
return nil, false, nil
}
if detailJob == nil && trackedSnapshot != nil {
clone := cloneTrackedJob(*trackedSnapshot)
detailJob = &clone
}
if detailJob == nil {
return nil, false, nil
}
if trackedSnapshot != nil {
mergeTrackedStatusIntoDetail(detailJob, trackedSnapshot)
}
detailJob.Parameters = enrichTrackedJobParameters(detailJob.JobType, detailJob.Parameters)
r.activitiesMu.RLock()
activities := append([]JobActivity(nil), r.activities...)
r.activitiesMu.RUnlock()
detail := &JobDetail{
Job: detailJob,
Activities: filterJobActivitiesFromSlice(activities, normalizedJobID, activityLimit),
LastUpdated: timeToPtr(time.Now().UTC()),
}
if history, err := r.store.LoadRunHistory(detailJob.JobType); err != nil {
return nil, true, err
} else if history != nil {
for i := range history.SuccessfulRuns {
record := history.SuccessfulRuns[i]
if strings.TrimSpace(record.JobID) == normalizedJobID {
recordCopy := record
detail.RunRecord = &recordCopy
break
}
}
if detail.RunRecord == nil {
for i := range history.ErrorRuns {
record := history.ErrorRuns[i]
if strings.TrimSpace(record.JobID) == normalizedJobID {
recordCopy := record
detail.RunRecord = &recordCopy
break
}
}
}
}
if relatedLimit > 0 {
related := make([]TrackedJob, 0, relatedLimit)
r.jobsMu.RLock()
for _, candidate := range r.jobs {
if strings.TrimSpace(candidate.JobType) != strings.TrimSpace(detailJob.JobType) {
continue
}
if strings.TrimSpace(candidate.JobID) == normalizedJobID {
continue
}
cloned := cloneTrackedJob(*candidate)
stripTrackedJobDetailFields(&cloned)
related = append(related, cloned)
if len(related) >= relatedLimit {
break
}
}
r.jobsMu.RUnlock()
detail.RelatedJobs = related
}
return detail, true, nil
}
func filterJobActivitiesFromSlice(all []JobActivity, jobID string, limit int) []JobActivity {
if strings.TrimSpace(jobID) == "" || len(all) == 0 {
return nil
}
activities := make([]JobActivity, 0, len(all))
for _, activity := range all {
if strings.TrimSpace(activity.JobID) != jobID {
continue
}
activities = append(activities, activity)
}
sort.Slice(activities, func(i, j int) bool {
return !activityLess(activities[i], activities[j]) // oldest-first for job timeline
})
if limit > 0 && len(activities) > limit {
activities = activities[len(activities)-limit:]
}
return activities
}
func stripTrackedJobDetailFields(job *TrackedJob) {
if job == nil {
return
}
job.Detail = ""
job.Parameters = nil
job.Labels = nil
job.ResultOutputValues = nil
}
func hasTrackedJobRichDetails(job TrackedJob) bool {
return strings.TrimSpace(job.Detail) != "" ||
len(job.Parameters) > 0 ||
len(job.Labels) > 0 ||
len(job.ResultOutputValues) > 0
}
func mergeTrackedStatusIntoDetail(detail *TrackedJob, tracked *TrackedJob) {
if detail == nil || tracked == nil {
return
}
if detail.JobType == "" {
detail.JobType = tracked.JobType
}
if detail.RequestID == "" {
detail.RequestID = tracked.RequestID
}
if detail.WorkerID == "" {
detail.WorkerID = tracked.WorkerID
}
if detail.DedupeKey == "" {
detail.DedupeKey = tracked.DedupeKey
}
if detail.Summary == "" {
detail.Summary = tracked.Summary
}
if detail.State == "" {
detail.State = tracked.State
}
if detail.Progress == 0 {
detail.Progress = tracked.Progress
}
if detail.Stage == "" {
detail.Stage = tracked.Stage
}
if detail.Message == "" {
detail.Message = tracked.Message
}
if detail.Attempt == 0 {
detail.Attempt = tracked.Attempt
}
if detail.CreatedAt == nil || detail.CreatedAt.IsZero() {
detail.CreatedAt = tracked.CreatedAt
}
if detail.UpdatedAt == nil || detail.UpdatedAt.IsZero() {
detail.UpdatedAt = tracked.UpdatedAt
}
if detail.CompletedAt == nil || detail.CompletedAt.IsZero() {
detail.CompletedAt = tracked.CompletedAt
}
if detail.ErrorMessage == "" {
detail.ErrorMessage = tracked.ErrorMessage
}
if detail.ResultSummary == "" {
detail.ResultSummary = tracked.ResultSummary
}
}
func (r *Plugin) handleJobProgressUpdate(workerID string, update *plugin_pb.JobProgressUpdate) {
if update == nil {
return
}
now := time.Now().UTC()
resolvedWorkerID := strings.TrimSpace(workerID)
if strings.TrimSpace(update.JobId) != "" {
r.jobsMu.Lock()
job := r.jobs[update.JobId]
if job == nil {
job = &TrackedJob{
JobID: update.JobId,
JobType: update.JobType,
RequestID: update.RequestId,
WorkerID: resolvedWorkerID,
CreatedAt: timeToPtr(now),
}
r.jobs[update.JobId] = job
}
if update.JobType != "" {
job.JobType = update.JobType
}
if update.RequestId != "" {
job.RequestID = update.RequestId
}
if job.WorkerID != "" {
resolvedWorkerID = job.WorkerID
} else if resolvedWorkerID != "" {
job.WorkerID = resolvedWorkerID
}
job.State = strings.ToLower(update.State.String())
job.Progress = update.ProgressPercent
job.Stage = update.Stage
job.Message = update.Message
job.UpdatedAt = timeToPtr(now)
r.pruneTrackedJobsLocked()
r.dirtyJobs = true
r.jobsMu.Unlock()
}
r.trackWorkerActivities(update.JobType, update.JobId, update.RequestId, resolvedWorkerID, update.Activities)
if update.Message != "" || update.Stage != "" {
source := "worker_progress"
if strings.TrimSpace(update.JobId) == "" {
source = "worker_detection"
}
r.appendActivity(JobActivity{
JobID: update.JobId,
JobType: update.JobType,
RequestID: update.RequestId,
WorkerID: resolvedWorkerID,
Source: source,
Message: update.Message,
Stage: update.Stage,
OccurredAt: timeToPtr(now),
})
}
}
func (r *Plugin) trackExecutionStart(requestID, workerID string, job *plugin_pb.JobSpec, attempt int32) {
if job == nil || strings.TrimSpace(job.JobId) == "" {
return
}
now := time.Now().UTC()
r.jobsMu.Lock()
tracked := r.jobs[job.JobId]
if tracked == nil {
tracked = &TrackedJob{
JobID: job.JobId,
CreatedAt: timeToPtr(now),
}
r.jobs[job.JobId] = tracked
}
tracked.JobType = job.JobType
tracked.RequestID = requestID
tracked.WorkerID = workerID
tracked.DedupeKey = job.DedupeKey
tracked.Summary = job.Summary
tracked.State = strings.ToLower(plugin_pb.JobState_JOB_STATE_ASSIGNED.String())
tracked.Progress = 0
tracked.Stage = "assigned"
tracked.Message = "job assigned to worker"
tracked.Attempt = attempt
if tracked.CreatedAt == nil || tracked.CreatedAt.IsZero() {
tracked.CreatedAt = timeToPtr(now)
}
tracked.UpdatedAt = timeToPtr(now)
trackedSnapshot := cloneTrackedJob(*tracked)
r.pruneTrackedJobsLocked()
r.dirtyJobs = true
r.jobsMu.Unlock()
r.persistJobDetailSnapshot(job.JobId, func(detail *TrackedJob) {
detail.JobID = job.JobId
detail.JobType = job.JobType
detail.RequestID = requestID
detail.WorkerID = workerID
detail.DedupeKey = job.DedupeKey
detail.Summary = job.Summary
detail.Detail = job.Detail
detail.Parameters = enrichTrackedJobParameters(job.JobType, configValueMapToPlain(job.Parameters))
if len(job.Labels) > 0 {
labels := make(map[string]string, len(job.Labels))
for key, value := range job.Labels {
labels[key] = value
}
detail.Labels = labels
} else {
detail.Labels = nil
}
detail.State = trackedSnapshot.State
detail.Progress = trackedSnapshot.Progress
detail.Stage = trackedSnapshot.Stage
detail.Message = trackedSnapshot.Message
detail.Attempt = attempt
if detail.CreatedAt == nil || detail.CreatedAt.IsZero() {
detail.CreatedAt = trackedSnapshot.CreatedAt
}
detail.UpdatedAt = trackedSnapshot.UpdatedAt
})
r.appendActivity(JobActivity{
JobID: job.JobId,
JobType: job.JobType,
RequestID: requestID,
WorkerID: workerID,
Source: "admin_dispatch",
Message: "job assigned",
Stage: "assigned",
OccurredAt: timeToPtr(now),
})
}
func (r *Plugin) trackExecutionQueued(job *plugin_pb.JobSpec) {
if job == nil || strings.TrimSpace(job.JobId) == "" {
return
}
now := time.Now().UTC()
r.jobsMu.Lock()
tracked := r.jobs[job.JobId]
if tracked == nil {
tracked = &TrackedJob{
JobID: job.JobId,
CreatedAt: timeToPtr(now),
}
r.jobs[job.JobId] = tracked
}
tracked.JobType = job.JobType
tracked.DedupeKey = job.DedupeKey
tracked.Summary = job.Summary
tracked.State = strings.ToLower(plugin_pb.JobState_JOB_STATE_PENDING.String())
tracked.Progress = 0
tracked.Stage = "queued"
tracked.Message = "waiting for available executor"
if tracked.CreatedAt == nil || tracked.CreatedAt.IsZero() {
tracked.CreatedAt = timeToPtr(now)
}
tracked.UpdatedAt = timeToPtr(now)
trackedSnapshot := cloneTrackedJob(*tracked)
r.pruneTrackedJobsLocked()
r.dirtyJobs = true
r.jobsMu.Unlock()
r.persistJobDetailSnapshot(job.JobId, func(detail *TrackedJob) {
detail.JobID = job.JobId
detail.JobType = job.JobType
detail.DedupeKey = job.DedupeKey
detail.Summary = job.Summary
detail.Detail = job.Detail
detail.Parameters = enrichTrackedJobParameters(job.JobType, configValueMapToPlain(job.Parameters))
if len(job.Labels) > 0 {
labels := make(map[string]string, len(job.Labels))
for key, value := range job.Labels {
labels[key] = value
}
detail.Labels = labels
} else {
detail.Labels = nil
}
detail.State = trackedSnapshot.State
detail.Progress = trackedSnapshot.Progress
detail.Stage = trackedSnapshot.Stage
detail.Message = trackedSnapshot.Message
if detail.CreatedAt == nil || detail.CreatedAt.IsZero() {
detail.CreatedAt = trackedSnapshot.CreatedAt
}
detail.UpdatedAt = trackedSnapshot.UpdatedAt
})
r.appendActivity(JobActivity{
JobID: job.JobId,
JobType: job.JobType,
Source: "admin_scheduler",
Message: "job queued for execution",
Stage: "queued",
OccurredAt: timeToPtr(now),
})
}
func (r *Plugin) trackExecutionCompletion(completed *plugin_pb.JobCompleted) *TrackedJob {
if completed == nil || strings.TrimSpace(completed.JobId) == "" {
return nil
}
now := time.Now().UTC()
if completed.CompletedAt != nil {
now = completed.CompletedAt.AsTime().UTC()
}
r.jobsMu.Lock()
tracked := r.jobs[completed.JobId]
if tracked == nil {
tracked = &TrackedJob{
JobID: completed.JobId,
CreatedAt: timeToPtr(now),
}
r.jobs[completed.JobId] = tracked
}
if completed.JobType != "" {
tracked.JobType = completed.JobType
}
if completed.RequestId != "" {
tracked.RequestID = completed.RequestId
}
if completed.Success {
tracked.State = strings.ToLower(plugin_pb.JobState_JOB_STATE_SUCCEEDED.String())
tracked.Progress = 100
tracked.Stage = "completed"
if completed.Result != nil {
tracked.ResultSummary = completed.Result.Summary
}
tracked.Message = tracked.ResultSummary
if tracked.Message == "" {
tracked.Message = "completed"
}
tracked.ErrorMessage = ""
} else {
tracked.State = strings.ToLower(plugin_pb.JobState_JOB_STATE_FAILED.String())
tracked.Stage = "failed"
tracked.ErrorMessage = completed.ErrorMessage
tracked.Message = completed.ErrorMessage
}
tracked.UpdatedAt = timeToPtr(now)
tracked.CompletedAt = timeToPtr(now)
r.pruneTrackedJobsLocked()
clone := cloneTrackedJob(*tracked)
r.dirtyJobs = true
r.jobsMu.Unlock()
r.persistJobDetailSnapshot(completed.JobId, func(detail *TrackedJob) {
detail.JobID = completed.JobId
if completed.JobType != "" {
detail.JobType = completed.JobType
}
if completed.RequestId != "" {
detail.RequestID = completed.RequestId
}
detail.State = clone.State
detail.Progress = clone.Progress
detail.Stage = clone.Stage
detail.Message = clone.Message
detail.ErrorMessage = clone.ErrorMessage
detail.ResultSummary = clone.ResultSummary
if completed.Success && completed.Result != nil {
detail.ResultOutputValues = configValueMapToPlain(completed.Result.OutputValues)
} else {
detail.ResultOutputValues = nil
}
if detail.CreatedAt == nil || detail.CreatedAt.IsZero() {
detail.CreatedAt = clone.CreatedAt
}
if detail.UpdatedAt == nil || detail.UpdatedAt.IsZero() {
detail.UpdatedAt = clone.UpdatedAt
}
if detail.CompletedAt == nil || detail.CompletedAt.IsZero() {
detail.CompletedAt = clone.CompletedAt
}
})
r.appendActivity(JobActivity{
JobID: completed.JobId,
JobType: completed.JobType,
RequestID: completed.RequestId,
WorkerID: clone.WorkerID,
Source: "worker_completion",
Message: clone.Message,
Stage: clone.Stage,
OccurredAt: timeToPtr(now),
})
return &clone
}
func (r *Plugin) trackWorkerActivities(jobType, jobID, requestID, workerID string, events []*plugin_pb.ActivityEvent) {
if len(events) == 0 {
return
}
for _, event := range events {
if event == nil {
continue
}
timestamp := time.Now().UTC()
if event.CreatedAt != nil {
timestamp = event.CreatedAt.AsTime().UTC()
}
r.appendActivity(JobActivity{
JobID: jobID,
JobType: jobType,
RequestID: requestID,
WorkerID: workerID,
Source: strings.ToLower(event.Source.String()),
Message: event.Message,
Stage: event.Stage,
Details: configValueMapToPlain(event.Details),
OccurredAt: timeToPtr(timestamp),
})
}
}
func (r *Plugin) appendActivity(activity JobActivity) {
if activity.OccurredAt == nil || activity.OccurredAt.IsZero() {
activity.OccurredAt = timeToPtr(time.Now().UTC())
}
r.activitiesMu.Lock()
r.activities = append(r.activities, activity)
if len(r.activities) > maxActivityRecords {
r.activities = r.activities[len(r.activities)-maxActivityRecords:]
}
r.dirtyActivities = true
r.activitiesMu.Unlock()
}
func (r *Plugin) pruneTrackedJobsLocked() {
if len(r.jobs) <= maxTrackedJobsTotal {
return
}
type sortableJob struct {
jobID string
updatedAt time.Time
}
terminalJobs := make([]sortableJob, 0)
for jobID, job := range r.jobs {
if job.State == StateSucceeded ||
job.State == StateFailed ||
job.State == StateCanceled {
updAt := time.Time{}
if job.UpdatedAt != nil {
updAt = *job.UpdatedAt
}
terminalJobs = append(terminalJobs, sortableJob{jobID, updAt})
}
}
if len(terminalJobs) == 0 {
return
}
sort.Slice(terminalJobs, func(i, j int) bool {
return terminalJobs[i].updatedAt.Before(terminalJobs[j].updatedAt)
})
toDelete := len(r.jobs) - maxTrackedJobsTotal
if toDelete <= 0 {
return
}
if toDelete > len(terminalJobs) {
toDelete = len(terminalJobs)
}
for i := 0; i < toDelete; i++ {
delete(r.jobs, terminalJobs[i].jobID)
}
}
func configValueMapToPlain(values map[string]*plugin_pb.ConfigValue) map[string]interface{} {
if len(values) == 0 {
return nil
}
payload, err := protojson.MarshalOptions{UseProtoNames: true}.Marshal(&plugin_pb.ValueMap{Fields: values})
if err != nil {
return nil
}
decoded := map[string]interface{}{}
if err := json.Unmarshal(payload, &decoded); err != nil {
return nil
}
fields, ok := decoded["fields"].(map[string]interface{})
if !ok {
return nil
}
return fields
}
func (r *Plugin) persistTrackedJobsSnapshot() {
r.jobsMu.Lock()
r.dirtyJobs = false
jobs := make([]TrackedJob, 0, len(r.jobs))
for _, job := range r.jobs {
if job == nil || strings.TrimSpace(job.JobID) == "" {
continue
}
clone := cloneTrackedJob(*job)
stripTrackedJobDetailFields(&clone)
jobs = append(jobs, clone)
}
r.jobsMu.Unlock()
if len(jobs) == 0 {
return
}
sort.Slice(jobs, func(i, j int) bool {
ti := time.Time{}
if jobs[i].UpdatedAt != nil {
ti = *jobs[i].UpdatedAt
}
tj := time.Time{}
if jobs[j].UpdatedAt != nil {
tj = *jobs[j].UpdatedAt
}
if !ti.Equal(tj) {
return ti.After(tj)
}
return jobs[i].JobID < jobs[j].JobID
})
if len(jobs) > maxTrackedJobsTotal {
jobs = jobs[:maxTrackedJobsTotal]
}
if err := r.store.SaveTrackedJobs(jobs); err != nil {
glog.Warningf("Plugin failed to persist tracked jobs: %v", err)
}
}
func (r *Plugin) persistJobDetailSnapshot(jobID string, apply func(detail *TrackedJob)) {
normalizedJobID, _ := sanitizeJobID(jobID)
if normalizedJobID == "" {
return
}
r.jobDetailsMu.Lock()
defer r.jobDetailsMu.Unlock()
detail, err := r.store.LoadJobDetail(normalizedJobID)
if err != nil {
glog.Warningf("Plugin failed to load job detail snapshot for %s: %v", normalizedJobID, err)
return
}
if detail == nil {
detail = &TrackedJob{
JobID: normalizedJobID,
}
}
if apply != nil {
apply(detail)
}
if err := r.store.SaveJobDetail(*detail); err != nil {
glog.Warningf("Plugin failed to persist job detail snapshot for %s: %v", normalizedJobID, err)
}
}
func (r *Plugin) persistActivitiesSnapshot() {
r.activitiesMu.Lock()
r.dirtyActivities = false
activities := append([]JobActivity(nil), r.activities...)
r.activitiesMu.Unlock()
if len(activities) == 0 {
return
}
if len(activities) > maxActivityRecords {
activities = activities[len(activities)-maxActivityRecords:]
}
if err := r.store.SaveActivities(activities); err != nil {
glog.Warningf("Plugin failed to persist activities: %v", err)
}
}
func (r *Plugin) persistenceLoop() {
defer r.wg.Done()
for {
select {
case <-r.shutdownCh:
r.persistTrackedJobsSnapshot()
r.persistActivitiesSnapshot()
return
case <-r.persistTicker.C:
r.jobsMu.RLock()
needsJobsFlush := r.dirtyJobs
r.jobsMu.RUnlock()
if needsJobsFlush {
r.persistTrackedJobsSnapshot()
}
r.activitiesMu.RLock()
needsActivitiesFlush := r.dirtyActivities
r.activitiesMu.RUnlock()
if needsActivitiesFlush {
r.persistActivitiesSnapshot()
}
}
}
}

View File

@@ -0,0 +1,600 @@
package plugin
import (
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
func TestPluginLoadsPersistedMonitorStateOnStart(t *testing.T) {
t.Parallel()
dataDir := t.TempDir()
store, err := NewConfigStore(dataDir)
if err != nil {
t.Fatalf("NewConfigStore: %v", err)
}
seedJobs := []TrackedJob{
{
JobID: "job-seeded",
JobType: "vacuum",
State: "running",
CreatedAt: timeToPtr(time.Now().UTC().Add(-2 * time.Minute)),
UpdatedAt: timeToPtr(time.Now().UTC().Add(-1 * time.Minute)),
},
}
seedActivities := []JobActivity{
{
JobID: "job-seeded",
JobType: "vacuum",
Source: "worker_progress",
Message: "seeded",
OccurredAt: timeToPtr(time.Now().UTC().Add(-30 * time.Second)),
},
}
if err := store.SaveTrackedJobs(seedJobs); err != nil {
t.Fatalf("SaveTrackedJobs: %v", err)
}
if err := store.SaveActivities(seedActivities); err != nil {
t.Fatalf("SaveActivities: %v", err)
}
pluginSvc, err := New(Options{DataDir: dataDir})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
gotJobs := pluginSvc.ListTrackedJobs("", "", 0)
if len(gotJobs) != 1 || gotJobs[0].JobID != "job-seeded" {
t.Fatalf("unexpected loaded jobs: %+v", gotJobs)
}
gotActivities := pluginSvc.ListActivities("", 0)
if len(gotActivities) != 1 || gotActivities[0].Message != "seeded" {
t.Fatalf("unexpected loaded activities: %+v", gotActivities)
}
}
func TestPluginPersistsMonitorStateAfterJobUpdates(t *testing.T) {
t.Parallel()
dataDir := t.TempDir()
pluginSvc, err := New(Options{DataDir: dataDir})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
job := &plugin_pb.JobSpec{
JobId: "job-persist",
JobType: "vacuum",
Summary: "persist test",
}
pluginSvc.trackExecutionStart("req-persist", "worker-a", job, 1)
pluginSvc.trackExecutionCompletion(&plugin_pb.JobCompleted{
RequestId: "req-persist",
JobId: "job-persist",
JobType: "vacuum",
Success: true,
Result: &plugin_pb.JobResult{Summary: "done"},
CompletedAt: timestamppb.New(time.Now().UTC()),
})
pluginSvc.Shutdown()
store, err := NewConfigStore(dataDir)
if err != nil {
t.Fatalf("NewConfigStore: %v", err)
}
trackedJobs, err := store.LoadTrackedJobs()
if err != nil {
t.Fatalf("LoadTrackedJobs: %v", err)
}
if len(trackedJobs) == 0 {
t.Fatalf("expected persisted tracked jobs")
}
found := false
for _, tracked := range trackedJobs {
if tracked.JobID == "job-persist" {
found = true
if tracked.State == "" {
t.Fatalf("persisted job state should not be empty")
}
}
}
if !found {
t.Fatalf("persisted tracked jobs missing job-persist")
}
activities, err := store.LoadActivities()
if err != nil {
t.Fatalf("LoadActivities: %v", err)
}
if len(activities) == 0 {
t.Fatalf("expected persisted activities")
}
}
func TestTrackExecutionQueuedMarksPendingState(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.trackExecutionQueued(&plugin_pb.JobSpec{
JobId: "job-pending-1",
JobType: "vacuum",
DedupeKey: "vacuum:1",
Summary: "pending queue item",
})
jobs := pluginSvc.ListTrackedJobs("vacuum", "", 10)
if len(jobs) != 1 {
t.Fatalf("expected one tracked pending job, got=%d", len(jobs))
}
job := jobs[0]
if job.JobID != "job-pending-1" {
t.Fatalf("unexpected pending job id: %s", job.JobID)
}
if job.State != "job_state_pending" {
t.Fatalf("unexpected pending job state: %s", job.State)
}
if job.Stage != "queued" {
t.Fatalf("unexpected pending job stage: %s", job.Stage)
}
activities := pluginSvc.ListActivities("vacuum", 50)
found := false
for _, activity := range activities {
if activity.JobID == "job-pending-1" && activity.Stage == "queued" && activity.Source == "admin_scheduler" {
found = true
break
}
}
if !found {
t.Fatalf("expected queued activity for pending job")
}
}
func TestHandleJobProgressUpdateCarriesWorkerIDInActivities(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
job := &plugin_pb.JobSpec{
JobId: "job-progress-worker",
JobType: "vacuum",
}
pluginSvc.trackExecutionStart("req-progress-worker", "worker-a", job, 1)
pluginSvc.handleJobProgressUpdate("worker-a", &plugin_pb.JobProgressUpdate{
RequestId: "req-progress-worker",
JobId: "job-progress-worker",
JobType: "vacuum",
State: plugin_pb.JobState_JOB_STATE_RUNNING,
ProgressPercent: 42.0,
Stage: "scan",
Message: "in progress",
Activities: []*plugin_pb.ActivityEvent{
{
Source: plugin_pb.ActivitySource_ACTIVITY_SOURCE_EXECUTOR,
Message: "volume scanned",
Stage: "scan",
},
},
})
activities := pluginSvc.ListActivities("vacuum", 0)
if len(activities) == 0 {
t.Fatalf("expected activity entries")
}
foundProgress := false
foundEvent := false
for _, activity := range activities {
if activity.Source == "worker_progress" && activity.Message == "in progress" {
foundProgress = true
if activity.WorkerID != "worker-a" {
t.Fatalf("worker_progress activity worker mismatch: got=%q want=%q", activity.WorkerID, "worker-a")
}
}
if activity.Message == "volume scanned" {
foundEvent = true
if activity.WorkerID != "worker-a" {
t.Fatalf("worker event worker mismatch: got=%q want=%q", activity.WorkerID, "worker-a")
}
}
}
if !foundProgress {
t.Fatalf("expected worker_progress activity")
}
if !foundEvent {
t.Fatalf("expected worker activity event")
}
}
func TestHandleJobProgressUpdateWithoutJobIDTracksDetectionActivities(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.handleJobProgressUpdate("worker-detector", &plugin_pb.JobProgressUpdate{
RequestId: "detect-req-1",
JobType: "vacuum",
State: plugin_pb.JobState_JOB_STATE_RUNNING,
Stage: "decision_summary",
Message: "VACUUM: No tasks created for 3 volumes",
Activities: []*plugin_pb.ActivityEvent{
{
Source: plugin_pb.ActivitySource_ACTIVITY_SOURCE_DETECTOR,
Stage: "decision_summary",
Message: "VACUUM: No tasks created for 3 volumes",
},
},
})
activities := pluginSvc.ListActivities("vacuum", 0)
if len(activities) == 0 {
t.Fatalf("expected activity entries")
}
foundDetectionProgress := false
foundDetectorEvent := false
for _, activity := range activities {
if activity.RequestID != "detect-req-1" {
continue
}
if activity.Source == "worker_detection" {
foundDetectionProgress = true
if activity.WorkerID != "worker-detector" {
t.Fatalf("worker_detection worker mismatch: got=%q want=%q", activity.WorkerID, "worker-detector")
}
}
if activity.Source == "activity_source_detector" {
foundDetectorEvent = true
if activity.WorkerID != "worker-detector" {
t.Fatalf("detector event worker mismatch: got=%q want=%q", activity.WorkerID, "worker-detector")
}
}
}
if !foundDetectionProgress {
t.Fatalf("expected worker_detection activity")
}
if !foundDetectorEvent {
t.Fatalf("expected detector activity event")
}
}
func TestHandleJobCompletedCarriesWorkerIDInActivitiesAndRunHistory(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
job := &plugin_pb.JobSpec{
JobId: "job-complete-worker",
JobType: "vacuum",
}
pluginSvc.trackExecutionStart("req-complete-worker", "worker-b", job, 1)
pluginSvc.handleJobCompleted(&plugin_pb.JobCompleted{
RequestId: "req-complete-worker",
JobId: "job-complete-worker",
JobType: "vacuum",
Success: true,
Activities: []*plugin_pb.ActivityEvent{
{
Source: plugin_pb.ActivitySource_ACTIVITY_SOURCE_EXECUTOR,
Message: "finalizer done",
Stage: "finalize",
},
},
CompletedAt: timestamppb.Now(),
})
pluginSvc.Shutdown()
activities := pluginSvc.ListActivities("vacuum", 0)
foundWorkerEvent := false
for _, activity := range activities {
if activity.Message == "finalizer done" {
foundWorkerEvent = true
if activity.WorkerID != "worker-b" {
t.Fatalf("worker completion event worker mismatch: got=%q want=%q", activity.WorkerID, "worker-b")
}
}
}
if !foundWorkerEvent {
t.Fatalf("expected completion worker event activity")
}
history, err := pluginSvc.LoadRunHistory("vacuum")
if err != nil {
t.Fatalf("LoadRunHistory: %v", err)
}
if history == nil || len(history.SuccessfulRuns) == 0 {
t.Fatalf("expected successful run history entry")
}
if history.SuccessfulRuns[0].WorkerID != "worker-b" {
t.Fatalf("run history worker mismatch: got=%q want=%q", history.SuccessfulRuns[0].WorkerID, "worker-b")
}
}
func TestTrackExecutionStartStoresJobPayloadDetails(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{DataDir: t.TempDir()})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.trackExecutionStart("req-payload", "worker-c", &plugin_pb.JobSpec{
JobId: "job-payload",
JobType: "vacuum",
Summary: "payload summary",
Detail: "payload detail",
Parameters: map[string]*plugin_pb.ConfigValue{
"volume_id": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 9},
},
},
Labels: map[string]string{
"source": "detector",
},
}, 2)
pluginSvc.Shutdown()
job, found := pluginSvc.GetTrackedJob("job-payload")
if !found || job == nil {
t.Fatalf("expected tracked job")
}
if job.Detail != "" {
t.Fatalf("expected in-memory tracked job detail to be stripped, got=%q", job.Detail)
}
if job.Attempt != 2 {
t.Fatalf("unexpected attempt: %d", job.Attempt)
}
if len(job.Labels) != 0 {
t.Fatalf("expected in-memory labels to be stripped, got=%+v", job.Labels)
}
if len(job.Parameters) != 0 {
t.Fatalf("expected in-memory parameters to be stripped, got=%+v", job.Parameters)
}
detail, found, err := pluginSvc.BuildJobDetail("job-payload", 100, 0)
if err != nil {
t.Fatalf("BuildJobDetail: %v", err)
}
if !found || detail == nil || detail.Job == nil {
t.Fatalf("expected disk-backed job detail")
}
if detail.Job.Detail != "payload detail" {
t.Fatalf("unexpected disk-backed detail: %q", detail.Job.Detail)
}
if got := detail.Job.Labels["source"]; got != "detector" {
t.Fatalf("unexpected disk-backed label source: %q", got)
}
if got, ok := detail.Job.Parameters["volume_id"].(map[string]interface{}); !ok || got["int64_value"] != "9" {
t.Fatalf("unexpected disk-backed parameters payload: %#v", detail.Job.Parameters["volume_id"])
}
}
func TestTrackExecutionStartStoresErasureCodingExecutionPlan(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{DataDir: t.TempDir()})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
taskParams := &worker_pb.TaskParams{
TaskId: "task-ec-1",
VolumeId: 29,
Collection: "photos",
Sources: []*worker_pb.TaskSource{
{
Node: "source-a:8080",
DataCenter: "dc1",
Rack: "rack1",
VolumeId: 29,
},
},
Targets: []*worker_pb.TaskTarget{
{
Node: "target-a:8080",
DataCenter: "dc1",
Rack: "rack2",
VolumeId: 29,
ShardIds: []uint32{0, 10},
},
{
Node: "target-b:8080",
DataCenter: "dc2",
Rack: "rack3",
VolumeId: 29,
ShardIds: []uint32{1, 11},
},
},
TaskParams: &worker_pb.TaskParams_ErasureCodingParams{
ErasureCodingParams: &worker_pb.ErasureCodingTaskParams{
DataShards: 10,
ParityShards: 4,
},
},
}
payload, err := proto.Marshal(taskParams)
if err != nil {
t.Fatalf("Marshal task params: %v", err)
}
pluginSvc.trackExecutionStart("req-ec-plan", "worker-ec", &plugin_pb.JobSpec{
JobId: "job-ec-plan",
JobType: "erasure_coding",
Parameters: map[string]*plugin_pb.ConfigValue{
"task_params_pb": {
Kind: &plugin_pb.ConfigValue_BytesValue{BytesValue: payload},
},
},
}, 1)
pluginSvc.Shutdown()
detail, found, err := pluginSvc.BuildJobDetail("job-ec-plan", 100, 0)
if err != nil {
t.Fatalf("BuildJobDetail: %v", err)
}
if !found || detail == nil || detail.Job == nil {
t.Fatalf("expected disk-backed detail")
}
rawPlan, ok := detail.Job.Parameters["execution_plan"]
if !ok {
t.Fatalf("expected execution_plan in parameters, got=%+v", detail.Job.Parameters)
}
plan, ok := rawPlan.(map[string]interface{})
if !ok {
t.Fatalf("unexpected execution_plan type: %T", rawPlan)
}
if plan["job_type"] != "erasure_coding" {
t.Fatalf("unexpected execution plan job type: %+v", plan["job_type"])
}
if plan["volume_id"] != float64(29) {
t.Fatalf("unexpected execution plan volume id: %+v", plan["volume_id"])
}
targets, ok := plan["targets"].([]interface{})
if !ok || len(targets) != 2 {
t.Fatalf("unexpected targets in execution plan: %+v", plan["targets"])
}
assignments, ok := plan["shard_assignments"].([]interface{})
if !ok || len(assignments) != 4 {
t.Fatalf("unexpected shard assignments in execution plan: %+v", plan["shard_assignments"])
}
firstAssignment, ok := assignments[0].(map[string]interface{})
if !ok {
t.Fatalf("unexpected first assignment payload: %+v", assignments[0])
}
if firstAssignment["shard_id"] != float64(0) || firstAssignment["kind"] != "data" {
t.Fatalf("unexpected first assignment: %+v", firstAssignment)
}
}
func TestBuildJobDetailIncludesActivitiesAndRunRecord(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{DataDir: t.TempDir()})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.trackExecutionStart("req-detail", "worker-z", &plugin_pb.JobSpec{
JobId: "job-detail",
JobType: "vacuum",
Summary: "detail summary",
}, 1)
pluginSvc.handleJobProgressUpdate("worker-z", &plugin_pb.JobProgressUpdate{
RequestId: "req-detail",
JobId: "job-detail",
JobType: "vacuum",
State: plugin_pb.JobState_JOB_STATE_RUNNING,
Stage: "scan",
Message: "scanning volume",
})
pluginSvc.handleJobCompleted(&plugin_pb.JobCompleted{
RequestId: "req-detail",
JobId: "job-detail",
JobType: "vacuum",
Success: true,
Result: &plugin_pb.JobResult{
Summary: "done",
OutputValues: map[string]*plugin_pb.ConfigValue{
"affected": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 1},
},
},
},
CompletedAt: timestamppb.Now(),
})
pluginSvc.Shutdown()
detail, found, err := pluginSvc.BuildJobDetail("job-detail", 100, 5)
if err != nil {
t.Fatalf("BuildJobDetail error: %v", err)
}
if !found || detail == nil {
t.Fatalf("expected job detail")
}
if detail.Job == nil || detail.Job.JobID != "job-detail" {
t.Fatalf("unexpected job detail payload: %+v", detail.Job)
}
if detail.RunRecord == nil || detail.RunRecord.JobID != "job-detail" {
t.Fatalf("expected run record for job-detail, got=%+v", detail.RunRecord)
}
if len(detail.Activities) == 0 {
t.Fatalf("expected activity timeline entries")
}
if detail.Job.ResultOutputValues == nil {
t.Fatalf("expected result output values")
}
}
func TestBuildJobDetailLoadsFromDiskWhenMemoryCleared(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{DataDir: t.TempDir()})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.trackExecutionStart("req-disk", "worker-d", &plugin_pb.JobSpec{
JobId: "job-disk",
JobType: "vacuum",
Summary: "disk summary",
Detail: "disk detail payload",
}, 1)
pluginSvc.Shutdown()
pluginSvc.jobsMu.Lock()
pluginSvc.jobs = map[string]*TrackedJob{}
pluginSvc.jobsMu.Unlock()
pluginSvc.activitiesMu.Lock()
pluginSvc.activities = nil
pluginSvc.activitiesMu.Unlock()
detail, found, err := pluginSvc.BuildJobDetail("job-disk", 100, 0)
if err != nil {
t.Fatalf("BuildJobDetail: %v", err)
}
if !found || detail == nil || detail.Job == nil {
t.Fatalf("expected detail from disk")
}
if detail.Job.Detail != "disk detail payload" {
t.Fatalf("unexpected disk detail payload: %q", detail.Job.Detail)
}
}

View File

@@ -0,0 +1,945 @@
package plugin
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"google.golang.org/protobuf/types/known/timestamppb"
)
var errExecutorAtCapacity = errors.New("executor is at capacity")
const (
defaultSchedulerTick = 5 * time.Second
defaultScheduledDetectionInterval = 300 * time.Second
defaultScheduledDetectionTimeout = 45 * time.Second
defaultScheduledExecutionTimeout = 90 * time.Second
defaultScheduledMaxResults int32 = 1000
defaultScheduledExecutionConcurrency = 1
defaultScheduledPerWorkerConcurrency = 1
maxScheduledExecutionConcurrency = 128
defaultScheduledRetryBackoff = 5 * time.Second
defaultClusterContextTimeout = 10 * time.Second
defaultWaitingBacklogFloor = 8
defaultWaitingBacklogMultiplier = 4
)
type schedulerPolicy struct {
DetectionInterval time.Duration
DetectionTimeout time.Duration
ExecutionTimeout time.Duration
RetryBackoff time.Duration
MaxResults int32
ExecutionConcurrency int
PerWorkerConcurrency int
RetryLimit int
ExecutorReserveBackoff time.Duration
}
func (r *Plugin) schedulerLoop() {
defer r.wg.Done()
ticker := time.NewTicker(r.schedulerTick)
defer ticker.Stop()
// Try once immediately on startup.
r.runSchedulerTick()
for {
select {
case <-r.shutdownCh:
return
case <-ticker.C:
r.runSchedulerTick()
}
}
}
func (r *Plugin) runSchedulerTick() {
jobTypes := r.registry.DetectableJobTypes()
if len(jobTypes) == 0 {
return
}
active := make(map[string]struct{}, len(jobTypes))
for _, jobType := range jobTypes {
active[jobType] = struct{}{}
policy, enabled, err := r.loadSchedulerPolicy(jobType)
if err != nil {
glog.Warningf("Plugin scheduler failed to load policy for %s: %v", jobType, err)
continue
}
if !enabled {
r.clearSchedulerJobType(jobType)
continue
}
if !r.markDetectionDue(jobType, policy.DetectionInterval) {
continue
}
r.wg.Add(1)
go func(jt string, p schedulerPolicy) {
defer r.wg.Done()
r.runScheduledDetection(jt, p)
}(jobType, policy)
}
r.pruneSchedulerState(active)
r.pruneDetectorLeases(active)
}
func (r *Plugin) loadSchedulerPolicy(jobType string) (schedulerPolicy, bool, error) {
cfg, err := r.store.LoadJobTypeConfig(jobType)
if err != nil {
return schedulerPolicy{}, false, err
}
descriptor, err := r.store.LoadDescriptor(jobType)
if err != nil {
return schedulerPolicy{}, false, err
}
adminRuntime := deriveSchedulerAdminRuntime(cfg, descriptor)
if adminRuntime == nil {
return schedulerPolicy{}, false, nil
}
if !adminRuntime.Enabled {
return schedulerPolicy{}, false, nil
}
policy := schedulerPolicy{
DetectionInterval: durationFromSeconds(adminRuntime.DetectionIntervalSeconds, defaultScheduledDetectionInterval),
DetectionTimeout: durationFromSeconds(adminRuntime.DetectionTimeoutSeconds, defaultScheduledDetectionTimeout),
ExecutionTimeout: defaultScheduledExecutionTimeout,
RetryBackoff: durationFromSeconds(adminRuntime.RetryBackoffSeconds, defaultScheduledRetryBackoff),
MaxResults: adminRuntime.MaxJobsPerDetection,
ExecutionConcurrency: int(adminRuntime.GlobalExecutionConcurrency),
PerWorkerConcurrency: int(adminRuntime.PerWorkerExecutionConcurrency),
RetryLimit: int(adminRuntime.RetryLimit),
ExecutorReserveBackoff: 200 * time.Millisecond,
}
if policy.DetectionInterval < r.schedulerTick {
policy.DetectionInterval = r.schedulerTick
}
if policy.MaxResults <= 0 {
policy.MaxResults = defaultScheduledMaxResults
}
if policy.ExecutionConcurrency <= 0 {
policy.ExecutionConcurrency = defaultScheduledExecutionConcurrency
}
if policy.ExecutionConcurrency > maxScheduledExecutionConcurrency {
policy.ExecutionConcurrency = maxScheduledExecutionConcurrency
}
if policy.PerWorkerConcurrency <= 0 {
policy.PerWorkerConcurrency = defaultScheduledPerWorkerConcurrency
}
if policy.PerWorkerConcurrency > policy.ExecutionConcurrency {
policy.PerWorkerConcurrency = policy.ExecutionConcurrency
}
if policy.RetryLimit < 0 {
policy.RetryLimit = 0
}
// Plugin protocol currently has only detection timeout in admin settings.
execTimeout := time.Duration(adminRuntime.DetectionTimeoutSeconds*2) * time.Second
if execTimeout < defaultScheduledExecutionTimeout {
execTimeout = defaultScheduledExecutionTimeout
}
policy.ExecutionTimeout = execTimeout
return policy, true, nil
}
func (r *Plugin) ListSchedulerStates() ([]SchedulerJobTypeState, error) {
jobTypes, err := r.ListKnownJobTypes()
if err != nil {
return nil, err
}
r.schedulerMu.Lock()
nextDetectionAt := make(map[string]time.Time, len(r.nextDetectionAt))
for jobType, nextRun := range r.nextDetectionAt {
nextDetectionAt[jobType] = nextRun
}
detectionInFlight := make(map[string]bool, len(r.detectionInFlight))
for jobType, inFlight := range r.detectionInFlight {
detectionInFlight[jobType] = inFlight
}
r.schedulerMu.Unlock()
states := make([]SchedulerJobTypeState, 0, len(jobTypes))
for _, jobType := range jobTypes {
state := SchedulerJobTypeState{
JobType: jobType,
DetectionInFlight: detectionInFlight[jobType],
}
if nextRun, ok := nextDetectionAt[jobType]; ok && !nextRun.IsZero() {
nextRunUTC := nextRun.UTC()
state.NextDetectionAt = &nextRunUTC
}
policy, enabled, loadErr := r.loadSchedulerPolicy(jobType)
if loadErr != nil {
state.PolicyError = loadErr.Error()
} else {
state.Enabled = enabled
if enabled {
state.DetectionIntervalSeconds = secondsFromDuration(policy.DetectionInterval)
state.DetectionTimeoutSeconds = secondsFromDuration(policy.DetectionTimeout)
state.ExecutionTimeoutSeconds = secondsFromDuration(policy.ExecutionTimeout)
state.MaxJobsPerDetection = policy.MaxResults
state.GlobalExecutionConcurrency = policy.ExecutionConcurrency
state.PerWorkerExecutionConcurrency = policy.PerWorkerConcurrency
state.RetryLimit = policy.RetryLimit
state.RetryBackoffSeconds = secondsFromDuration(policy.RetryBackoff)
}
}
leasedWorkerID := r.getDetectorLease(jobType)
if leasedWorkerID != "" {
state.DetectorWorkerID = leasedWorkerID
if worker, ok := r.registry.Get(leasedWorkerID); ok {
if capability := worker.Capabilities[jobType]; capability != nil && capability.CanDetect {
state.DetectorAvailable = true
}
}
}
if state.DetectorWorkerID == "" {
detector, detectorErr := r.registry.PickDetector(jobType)
if detectorErr == nil && detector != nil {
state.DetectorAvailable = true
state.DetectorWorkerID = detector.WorkerID
}
}
executors, executorErr := r.registry.ListExecutors(jobType)
if executorErr == nil {
state.ExecutorWorkerCount = len(executors)
}
states = append(states, state)
}
return states, nil
}
func deriveSchedulerAdminRuntime(
cfg *plugin_pb.PersistedJobTypeConfig,
descriptor *plugin_pb.JobTypeDescriptor,
) *plugin_pb.AdminRuntimeConfig {
if cfg != nil && cfg.AdminRuntime != nil {
adminConfig := *cfg.AdminRuntime
return &adminConfig
}
if descriptor == nil || descriptor.AdminRuntimeDefaults == nil {
return nil
}
defaults := descriptor.AdminRuntimeDefaults
return &plugin_pb.AdminRuntimeConfig{
Enabled: defaults.Enabled,
DetectionIntervalSeconds: defaults.DetectionIntervalSeconds,
DetectionTimeoutSeconds: defaults.DetectionTimeoutSeconds,
MaxJobsPerDetection: defaults.MaxJobsPerDetection,
GlobalExecutionConcurrency: defaults.GlobalExecutionConcurrency,
PerWorkerExecutionConcurrency: defaults.PerWorkerExecutionConcurrency,
RetryLimit: defaults.RetryLimit,
RetryBackoffSeconds: defaults.RetryBackoffSeconds,
}
}
func (r *Plugin) markDetectionDue(jobType string, interval time.Duration) bool {
now := time.Now().UTC()
r.schedulerMu.Lock()
defer r.schedulerMu.Unlock()
if r.detectionInFlight[jobType] {
return false
}
nextRun, exists := r.nextDetectionAt[jobType]
if exists && now.Before(nextRun) {
return false
}
r.nextDetectionAt[jobType] = now.Add(interval)
r.detectionInFlight[jobType] = true
return true
}
func (r *Plugin) finishDetection(jobType string) {
r.schedulerMu.Lock()
delete(r.detectionInFlight, jobType)
r.schedulerMu.Unlock()
}
func (r *Plugin) pruneSchedulerState(activeJobTypes map[string]struct{}) {
r.schedulerMu.Lock()
defer r.schedulerMu.Unlock()
for jobType := range r.nextDetectionAt {
if _, ok := activeJobTypes[jobType]; !ok {
delete(r.nextDetectionAt, jobType)
delete(r.detectionInFlight, jobType)
}
}
}
func (r *Plugin) clearSchedulerJobType(jobType string) {
r.schedulerMu.Lock()
delete(r.nextDetectionAt, jobType)
delete(r.detectionInFlight, jobType)
r.schedulerMu.Unlock()
r.clearDetectorLease(jobType, "")
}
func (r *Plugin) pruneDetectorLeases(activeJobTypes map[string]struct{}) {
r.detectorLeaseMu.Lock()
defer r.detectorLeaseMu.Unlock()
for jobType := range r.detectorLeases {
if _, ok := activeJobTypes[jobType]; !ok {
delete(r.detectorLeases, jobType)
}
}
}
func (r *Plugin) runScheduledDetection(jobType string, policy schedulerPolicy) {
defer r.finishDetection(jobType)
start := time.Now().UTC()
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: "scheduled detection started",
Stage: "detecting",
OccurredAt: timeToPtr(start),
})
if skip, waitingCount, waitingThreshold := r.shouldSkipDetectionForWaitingJobs(jobType, policy); skip {
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled detection skipped: waiting backlog %d reached threshold %d", waitingCount, waitingThreshold),
Stage: "skipped_waiting_backlog",
OccurredAt: timeToPtr(time.Now().UTC()),
})
return
}
clusterContext, err := r.loadSchedulerClusterContext()
if err != nil {
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled detection aborted: %v", err),
Stage: "failed",
OccurredAt: timeToPtr(time.Now().UTC()),
})
return
}
ctx, cancel := context.WithTimeout(context.Background(), policy.DetectionTimeout)
proposals, err := r.RunDetection(ctx, jobType, clusterContext, policy.MaxResults)
cancel()
if err != nil {
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled detection failed: %v", err),
Stage: "failed",
OccurredAt: timeToPtr(time.Now().UTC()),
})
return
}
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled detection completed: %d proposal(s)", len(proposals)),
Stage: "detected",
OccurredAt: timeToPtr(time.Now().UTC()),
})
filteredByActive, skippedActive := r.filterProposalsWithActiveJobs(jobType, proposals)
if skippedActive > 0 {
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled detection skipped %d proposal(s) due to active assigned/running jobs", skippedActive),
Stage: "deduped_active_jobs",
OccurredAt: timeToPtr(time.Now().UTC()),
})
}
if len(filteredByActive) == 0 {
return
}
filtered := r.filterScheduledProposals(filteredByActive)
if len(filtered) != len(filteredByActive) {
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled detection deduped %d proposal(s) within this run", len(filteredByActive)-len(filtered)),
Stage: "deduped",
OccurredAt: timeToPtr(time.Now().UTC()),
})
}
if len(filtered) == 0 {
return
}
r.dispatchScheduledProposals(jobType, filtered, clusterContext, policy)
}
func (r *Plugin) loadSchedulerClusterContext() (*plugin_pb.ClusterContext, error) {
if r.clusterContextProvider == nil {
return nil, fmt.Errorf("cluster context provider is not configured")
}
ctx, cancel := context.WithTimeout(context.Background(), defaultClusterContextTimeout)
defer cancel()
clusterContext, err := r.clusterContextProvider(ctx)
if err != nil {
return nil, err
}
if clusterContext == nil {
return nil, fmt.Errorf("cluster context provider returned nil")
}
return clusterContext, nil
}
func (r *Plugin) dispatchScheduledProposals(
jobType string,
proposals []*plugin_pb.JobProposal,
clusterContext *plugin_pb.ClusterContext,
policy schedulerPolicy,
) {
jobQueue := make(chan *plugin_pb.JobSpec, len(proposals))
for index, proposal := range proposals {
job := buildScheduledJobSpec(jobType, proposal, index)
r.trackExecutionQueued(job)
select {
case <-r.shutdownCh:
close(jobQueue)
return
default:
jobQueue <- job
}
}
close(jobQueue)
var wg sync.WaitGroup
var statsMu sync.Mutex
successCount := 0
errorCount := 0
workerCount := policy.ExecutionConcurrency
if workerCount < 1 {
workerCount = 1
}
for i := 0; i < workerCount; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for job := range jobQueue {
select {
case <-r.shutdownCh:
return
default:
}
for {
select {
case <-r.shutdownCh:
return
default:
}
executor, release, reserveErr := r.reserveScheduledExecutor(jobType, policy)
if reserveErr != nil {
select {
case <-r.shutdownCh:
return
default:
}
statsMu.Lock()
errorCount++
statsMu.Unlock()
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled execution reservation failed: %v", reserveErr),
Stage: "failed",
OccurredAt: timeToPtr(time.Now().UTC()),
})
break
}
err := r.executeScheduledJobWithExecutor(executor, job, clusterContext, policy)
release()
if errors.Is(err, errExecutorAtCapacity) {
r.trackExecutionQueued(job)
if !waitForShutdownOrTimer(r.shutdownCh, policy.ExecutorReserveBackoff) {
return
}
continue
}
if err != nil {
statsMu.Lock()
errorCount++
statsMu.Unlock()
r.appendActivity(JobActivity{
JobID: job.JobId,
JobType: job.JobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled execution failed: %v", err),
Stage: "failed",
OccurredAt: timeToPtr(time.Now().UTC()),
})
break
}
statsMu.Lock()
successCount++
statsMu.Unlock()
break
}
}
}()
}
wg.Wait()
r.appendActivity(JobActivity{
JobType: jobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("scheduled execution finished: success=%d error=%d", successCount, errorCount),
Stage: "executed",
OccurredAt: timeToPtr(time.Now().UTC()),
})
}
func (r *Plugin) reserveScheduledExecutor(
jobType string,
policy schedulerPolicy,
) (*WorkerSession, func(), error) {
deadline := time.Now().Add(policy.ExecutionTimeout)
if policy.ExecutionTimeout <= 0 {
deadline = time.Now().Add(10 * time.Minute) // Default cap
}
for {
select {
case <-r.shutdownCh:
return nil, nil, fmt.Errorf("plugin is shutting down")
default:
}
if time.Now().After(deadline) {
return nil, nil, fmt.Errorf("timed out waiting for executor capacity for %s", jobType)
}
executors, err := r.registry.ListExecutors(jobType)
if err != nil {
if !waitForShutdownOrTimer(r.shutdownCh, policy.ExecutorReserveBackoff) {
return nil, nil, fmt.Errorf("plugin is shutting down")
}
continue
}
for _, executor := range executors {
release, ok := r.tryReserveExecutorCapacity(executor, jobType, policy)
if !ok {
continue
}
return executor, release, nil
}
if !waitForShutdownOrTimer(r.shutdownCh, policy.ExecutorReserveBackoff) {
return nil, nil, fmt.Errorf("plugin is shutting down")
}
}
}
func (r *Plugin) tryReserveExecutorCapacity(
executor *WorkerSession,
jobType string,
policy schedulerPolicy,
) (func(), bool) {
if executor == nil || strings.TrimSpace(executor.WorkerID) == "" {
return nil, false
}
limit := schedulerWorkerExecutionLimit(executor, jobType, policy)
if limit <= 0 {
return nil, false
}
heartbeatUsed := 0
if executor.Heartbeat != nil && executor.Heartbeat.ExecutionSlotsUsed > 0 {
heartbeatUsed = int(executor.Heartbeat.ExecutionSlotsUsed)
}
workerID := strings.TrimSpace(executor.WorkerID)
r.schedulerExecMu.Lock()
reserved := r.schedulerExecReservations[workerID]
if heartbeatUsed+reserved >= limit {
r.schedulerExecMu.Unlock()
return nil, false
}
r.schedulerExecReservations[workerID] = reserved + 1
r.schedulerExecMu.Unlock()
release := func() {
r.releaseExecutorCapacity(workerID)
}
return release, true
}
func (r *Plugin) releaseExecutorCapacity(workerID string) {
workerID = strings.TrimSpace(workerID)
if workerID == "" {
return
}
r.schedulerExecMu.Lock()
defer r.schedulerExecMu.Unlock()
current := r.schedulerExecReservations[workerID]
if current <= 1 {
delete(r.schedulerExecReservations, workerID)
return
}
r.schedulerExecReservations[workerID] = current - 1
}
func schedulerWorkerExecutionLimit(executor *WorkerSession, jobType string, policy schedulerPolicy) int {
limit := policy.PerWorkerConcurrency
if limit <= 0 {
limit = defaultScheduledPerWorkerConcurrency
}
if capability := executor.Capabilities[jobType]; capability != nil && capability.MaxExecutionConcurrency > 0 {
capLimit := int(capability.MaxExecutionConcurrency)
if capLimit < limit {
limit = capLimit
}
}
if executor.Heartbeat != nil && executor.Heartbeat.ExecutionSlotsTotal > 0 {
heartbeatLimit := int(executor.Heartbeat.ExecutionSlotsTotal)
if heartbeatLimit < limit {
limit = heartbeatLimit
}
}
if limit < 0 {
return 0
}
return limit
}
func (r *Plugin) executeScheduledJobWithExecutor(
executor *WorkerSession,
job *plugin_pb.JobSpec,
clusterContext *plugin_pb.ClusterContext,
policy schedulerPolicy,
) error {
maxAttempts := policy.RetryLimit + 1
if maxAttempts < 1 {
maxAttempts = 1
}
var lastErr error
for attempt := 1; attempt <= maxAttempts; attempt++ {
select {
case <-r.shutdownCh:
return fmt.Errorf("plugin is shutting down")
default:
}
execCtx, cancel := context.WithTimeout(context.Background(), policy.ExecutionTimeout)
_, err := r.executeJobWithExecutor(execCtx, executor, job, clusterContext, int32(attempt))
cancel()
if err == nil {
return nil
}
if isExecutorAtCapacityError(err) {
return errExecutorAtCapacity
}
lastErr = err
if attempt < maxAttempts {
r.appendActivity(JobActivity{
JobID: job.JobId,
JobType: job.JobType,
Source: "admin_scheduler",
Message: fmt.Sprintf("retrying job attempt %d/%d after error: %v", attempt, maxAttempts, err),
Stage: "retry",
OccurredAt: timeToPtr(time.Now().UTC()),
})
if !waitForShutdownOrTimer(r.shutdownCh, policy.RetryBackoff) {
return fmt.Errorf("plugin is shutting down")
}
}
}
if lastErr == nil {
lastErr = fmt.Errorf("execution failed without an explicit error")
}
return lastErr
}
func (r *Plugin) shouldSkipDetectionForWaitingJobs(jobType string, policy schedulerPolicy) (bool, int, int) {
waitingCount := r.countWaitingTrackedJobs(jobType)
threshold := waitingBacklogThreshold(policy)
if threshold <= 0 {
return false, waitingCount, threshold
}
return waitingCount >= threshold, waitingCount, threshold
}
func (r *Plugin) countWaitingTrackedJobs(jobType string) int {
normalizedJobType := strings.TrimSpace(jobType)
if normalizedJobType == "" {
return 0
}
waiting := 0
r.jobsMu.RLock()
for _, job := range r.jobs {
if job == nil {
continue
}
if strings.TrimSpace(job.JobType) != normalizedJobType {
continue
}
if !isWaitingTrackedJobState(job.State) {
continue
}
waiting++
}
r.jobsMu.RUnlock()
return waiting
}
func waitingBacklogThreshold(policy schedulerPolicy) int {
concurrency := policy.ExecutionConcurrency
if concurrency <= 0 {
concurrency = defaultScheduledExecutionConcurrency
}
threshold := concurrency * defaultWaitingBacklogMultiplier
if threshold < defaultWaitingBacklogFloor {
threshold = defaultWaitingBacklogFloor
}
if policy.MaxResults > 0 && threshold > int(policy.MaxResults) {
threshold = int(policy.MaxResults)
}
return threshold
}
func isExecutorAtCapacityError(err error) bool {
if err == nil {
return false
}
if errors.Is(err, errExecutorAtCapacity) {
return true
}
return strings.Contains(strings.ToLower(err.Error()), "executor is at capacity")
}
func buildScheduledJobSpec(jobType string, proposal *plugin_pb.JobProposal, index int) *plugin_pb.JobSpec {
now := timestamppb.Now()
jobID := fmt.Sprintf("%s-scheduled-%d-%d", jobType, now.AsTime().UnixNano(), index)
job := &plugin_pb.JobSpec{
JobId: jobID,
JobType: jobType,
Priority: plugin_pb.JobPriority_JOB_PRIORITY_NORMAL,
Parameters: map[string]*plugin_pb.ConfigValue{},
Labels: map[string]string{},
CreatedAt: now,
ScheduledAt: now,
}
if proposal == nil {
return job
}
if proposal.JobType != "" {
job.JobType = proposal.JobType
}
job.Summary = proposal.Summary
job.Detail = proposal.Detail
if proposal.Priority != plugin_pb.JobPriority_JOB_PRIORITY_UNSPECIFIED {
job.Priority = proposal.Priority
}
job.DedupeKey = proposal.DedupeKey
job.Parameters = CloneConfigValueMap(proposal.Parameters)
if proposal.Labels != nil {
job.Labels = make(map[string]string, len(proposal.Labels))
for k, v := range proposal.Labels {
job.Labels[k] = v
}
}
if proposal.NotBefore != nil {
job.ScheduledAt = proposal.NotBefore
}
return job
}
func durationFromSeconds(seconds int32, defaultValue time.Duration) time.Duration {
if seconds <= 0 {
return defaultValue
}
return time.Duration(seconds) * time.Second
}
func secondsFromDuration(duration time.Duration) int32 {
if duration <= 0 {
return 0
}
return int32(duration / time.Second)
}
func waitForShutdownOrTimer(shutdown <-chan struct{}, duration time.Duration) bool {
if duration <= 0 {
return true
}
timer := time.NewTimer(duration)
defer timer.Stop()
select {
case <-shutdown:
return false
case <-timer.C:
return true
}
}
func (r *Plugin) filterProposalsWithActiveJobs(jobType string, proposals []*plugin_pb.JobProposal) ([]*plugin_pb.JobProposal, int) {
if len(proposals) == 0 {
return proposals, 0
}
activeKeys := make(map[string]struct{})
r.jobsMu.RLock()
for _, job := range r.jobs {
if job == nil {
continue
}
if strings.TrimSpace(job.JobType) != strings.TrimSpace(jobType) {
continue
}
if !isActiveTrackedJobState(job.State) {
continue
}
key := strings.TrimSpace(job.DedupeKey)
if key == "" {
key = strings.TrimSpace(job.JobID)
}
if key == "" {
continue
}
activeKeys[key] = struct{}{}
}
r.jobsMu.RUnlock()
if len(activeKeys) == 0 {
return proposals, 0
}
filtered := make([]*plugin_pb.JobProposal, 0, len(proposals))
skipped := 0
for _, proposal := range proposals {
if proposal == nil {
continue
}
key := proposalExecutionKey(proposal)
if key != "" {
if _, exists := activeKeys[key]; exists {
skipped++
continue
}
}
filtered = append(filtered, proposal)
}
return filtered, skipped
}
func proposalExecutionKey(proposal *plugin_pb.JobProposal) string {
if proposal == nil {
return ""
}
key := strings.TrimSpace(proposal.DedupeKey)
if key != "" {
return key
}
return strings.TrimSpace(proposal.ProposalId)
}
func isActiveTrackedJobState(state string) bool {
normalized := strings.ToLower(strings.TrimSpace(state))
switch normalized {
case "pending", "assigned", "running", "in_progress", "job_state_pending", "job_state_assigned", "job_state_running":
return true
default:
return false
}
}
func isWaitingTrackedJobState(state string) bool {
normalized := strings.ToLower(strings.TrimSpace(state))
return normalized == "pending" || normalized == "job_state_pending"
}
func (r *Plugin) filterScheduledProposals(proposals []*plugin_pb.JobProposal) []*plugin_pb.JobProposal {
filtered := make([]*plugin_pb.JobProposal, 0, len(proposals))
seenInRun := make(map[string]struct{}, len(proposals))
for _, proposal := range proposals {
if proposal == nil {
continue
}
key := proposal.DedupeKey
if key == "" {
key = proposal.ProposalId
}
if key == "" {
filtered = append(filtered, proposal)
continue
}
if _, exists := seenInRun[key]; exists {
continue
}
seenInRun[key] = struct{}{}
filtered = append(filtered, proposal)
}
return filtered
}

View File

@@ -0,0 +1,583 @@
package plugin
import (
"fmt"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
)
func TestLoadSchedulerPolicyUsesAdminConfig(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
err = pluginSvc.SaveJobTypeConfig(&plugin_pb.PersistedJobTypeConfig{
JobType: "vacuum",
AdminRuntime: &plugin_pb.AdminRuntimeConfig{
Enabled: true,
DetectionIntervalSeconds: 30,
DetectionTimeoutSeconds: 20,
MaxJobsPerDetection: 123,
GlobalExecutionConcurrency: 5,
PerWorkerExecutionConcurrency: 2,
RetryLimit: 4,
RetryBackoffSeconds: 7,
},
})
if err != nil {
t.Fatalf("SaveJobTypeConfig: %v", err)
}
policy, enabled, err := pluginSvc.loadSchedulerPolicy("vacuum")
if err != nil {
t.Fatalf("loadSchedulerPolicy: %v", err)
}
if !enabled {
t.Fatalf("expected enabled policy")
}
if policy.MaxResults != 123 {
t.Fatalf("unexpected max results: got=%d", policy.MaxResults)
}
if policy.ExecutionConcurrency != 5 {
t.Fatalf("unexpected global concurrency: got=%d", policy.ExecutionConcurrency)
}
if policy.PerWorkerConcurrency != 2 {
t.Fatalf("unexpected per-worker concurrency: got=%d", policy.PerWorkerConcurrency)
}
if policy.RetryLimit != 4 {
t.Fatalf("unexpected retry limit: got=%d", policy.RetryLimit)
}
}
func TestLoadSchedulerPolicyUsesDescriptorDefaultsWhenConfigMissing(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
err = pluginSvc.store.SaveDescriptor("ec", &plugin_pb.JobTypeDescriptor{
JobType: "ec",
AdminRuntimeDefaults: &plugin_pb.AdminRuntimeDefaults{
Enabled: true,
DetectionIntervalSeconds: 60,
DetectionTimeoutSeconds: 25,
MaxJobsPerDetection: 30,
GlobalExecutionConcurrency: 4,
PerWorkerExecutionConcurrency: 2,
RetryLimit: 3,
RetryBackoffSeconds: 6,
},
})
if err != nil {
t.Fatalf("SaveDescriptor: %v", err)
}
policy, enabled, err := pluginSvc.loadSchedulerPolicy("ec")
if err != nil {
t.Fatalf("loadSchedulerPolicy: %v", err)
}
if !enabled {
t.Fatalf("expected enabled policy from descriptor defaults")
}
if policy.MaxResults != 30 {
t.Fatalf("unexpected max results: got=%d", policy.MaxResults)
}
if policy.ExecutionConcurrency != 4 {
t.Fatalf("unexpected global concurrency: got=%d", policy.ExecutionConcurrency)
}
if policy.PerWorkerConcurrency != 2 {
t.Fatalf("unexpected per-worker concurrency: got=%d", policy.PerWorkerConcurrency)
}
}
func TestReserveScheduledExecutorRespectsPerWorkerLimit(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanExecute: true, MaxExecutionConcurrency: 4},
},
})
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-b",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanExecute: true, MaxExecutionConcurrency: 2},
},
})
policy := schedulerPolicy{
PerWorkerConcurrency: 1,
ExecutorReserveBackoff: time.Millisecond,
}
executor1, release1, err := pluginSvc.reserveScheduledExecutor("balance", policy)
if err != nil {
t.Fatalf("reserve executor 1: %v", err)
}
defer release1()
executor2, release2, err := pluginSvc.reserveScheduledExecutor("balance", policy)
if err != nil {
t.Fatalf("reserve executor 2: %v", err)
}
defer release2()
if executor1.WorkerID == executor2.WorkerID {
t.Fatalf("expected different executors due per-worker limit, got same worker %s", executor1.WorkerID)
}
}
func TestFilterScheduledProposalsDedupe(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
proposals := []*plugin_pb.JobProposal{
{ProposalId: "p1", DedupeKey: "d1"},
{ProposalId: "p2", DedupeKey: "d1"}, // same dedupe key
{ProposalId: "p3", DedupeKey: "d3"},
{ProposalId: "p3"}, // fallback dedupe by proposal id
{ProposalId: "p4"},
{ProposalId: "p4"}, // same proposal id, no dedupe key
}
filtered := pluginSvc.filterScheduledProposals(proposals)
if len(filtered) != 4 {
t.Fatalf("unexpected filtered size: got=%d want=4", len(filtered))
}
filtered2 := pluginSvc.filterScheduledProposals(proposals)
if len(filtered2) != 4 {
t.Fatalf("expected second run dedupe to be per-run only, got=%d", len(filtered2))
}
}
func TestBuildScheduledJobSpecDoesNotReuseProposalID(t *testing.T) {
t.Parallel()
proposal := &plugin_pb.JobProposal{
ProposalId: "vacuum-2",
DedupeKey: "vacuum:2",
JobType: "vacuum",
}
jobA := buildScheduledJobSpec("vacuum", proposal, 0)
jobB := buildScheduledJobSpec("vacuum", proposal, 1)
if jobA.JobId == proposal.ProposalId {
t.Fatalf("scheduled job id must not reuse proposal id: %s", jobA.JobId)
}
if jobB.JobId == proposal.ProposalId {
t.Fatalf("scheduled job id must not reuse proposal id: %s", jobB.JobId)
}
if jobA.JobId == jobB.JobId {
t.Fatalf("scheduled job ids must be unique across jobs: %s", jobA.JobId)
}
}
func TestFilterProposalsWithActiveJobs(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.trackExecutionStart("req-1", "worker-a", &plugin_pb.JobSpec{
JobId: "job-1",
JobType: "vacuum",
DedupeKey: "vacuum:k1",
}, 1)
pluginSvc.trackExecutionStart("req-2", "worker-b", &plugin_pb.JobSpec{
JobId: "job-2",
JobType: "vacuum",
}, 1)
pluginSvc.trackExecutionQueued(&plugin_pb.JobSpec{
JobId: "job-3",
JobType: "vacuum",
DedupeKey: "vacuum:k4",
})
filtered, skipped := pluginSvc.filterProposalsWithActiveJobs("vacuum", []*plugin_pb.JobProposal{
{ProposalId: "proposal-1", JobType: "vacuum", DedupeKey: "vacuum:k1"},
{ProposalId: "job-2", JobType: "vacuum"},
{ProposalId: "proposal-2b", JobType: "vacuum", DedupeKey: "vacuum:k4"},
{ProposalId: "proposal-3", JobType: "vacuum", DedupeKey: "vacuum:k3"},
{ProposalId: "proposal-4", JobType: "balance", DedupeKey: "balance:k1"},
})
if skipped != 3 {
t.Fatalf("unexpected skipped count: got=%d want=3", skipped)
}
if len(filtered) != 2 {
t.Fatalf("unexpected filtered size: got=%d want=2", len(filtered))
}
if filtered[0].ProposalId != "proposal-3" || filtered[1].ProposalId != "proposal-4" {
t.Fatalf("unexpected filtered proposals: got=%s,%s", filtered[0].ProposalId, filtered[1].ProposalId)
}
}
func TestReserveScheduledExecutorTimesOutWhenNoExecutor(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
policy := schedulerPolicy{
ExecutionTimeout: 30 * time.Millisecond,
ExecutorReserveBackoff: 5 * time.Millisecond,
PerWorkerConcurrency: 1,
}
start := time.Now()
pluginSvc.Shutdown()
_, _, err = pluginSvc.reserveScheduledExecutor("missing-job-type", policy)
if err == nil {
t.Fatalf("expected reservation shutdown error")
}
if time.Since(start) > 50*time.Millisecond {
t.Fatalf("reservation returned too late after shutdown: duration=%v", time.Since(start))
}
}
func TestReserveScheduledExecutorWaitsForWorkerCapacity(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanExecute: true, MaxExecutionConcurrency: 1},
},
})
policy := schedulerPolicy{
ExecutionTimeout: time.Second,
PerWorkerConcurrency: 8,
ExecutorReserveBackoff: 5 * time.Millisecond,
}
_, release1, err := pluginSvc.reserveScheduledExecutor("balance", policy)
if err != nil {
t.Fatalf("reserve executor 1: %v", err)
}
defer release1()
type reserveResult struct {
err error
}
secondReserveCh := make(chan reserveResult, 1)
go func() {
_, release2, reserveErr := pluginSvc.reserveScheduledExecutor("balance", policy)
if release2 != nil {
release2()
}
secondReserveCh <- reserveResult{err: reserveErr}
}()
select {
case result := <-secondReserveCh:
t.Fatalf("expected second reservation to wait for capacity, got=%v", result.err)
case <-time.After(25 * time.Millisecond):
// Expected: still waiting.
}
release1()
select {
case result := <-secondReserveCh:
if result.err != nil {
t.Fatalf("second reservation error: %v", result.err)
}
case <-time.After(200 * time.Millisecond):
t.Fatalf("second reservation did not acquire after capacity release")
}
}
func TestShouldSkipDetectionForWaitingJobs(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
policy := schedulerPolicy{
ExecutionConcurrency: 2,
MaxResults: 100,
}
threshold := waitingBacklogThreshold(policy)
if threshold <= 0 {
t.Fatalf("expected positive waiting threshold")
}
for i := 0; i < threshold; i++ {
pluginSvc.trackExecutionQueued(&plugin_pb.JobSpec{
JobId: fmt.Sprintf("job-waiting-%d", i),
JobType: "vacuum",
DedupeKey: fmt.Sprintf("vacuum:%d", i),
})
}
skip, waitingCount, waitingThreshold := pluginSvc.shouldSkipDetectionForWaitingJobs("vacuum", policy)
if !skip {
t.Fatalf("expected detection to skip when waiting backlog reaches threshold")
}
if waitingCount != threshold {
t.Fatalf("unexpected waiting count: got=%d want=%d", waitingCount, threshold)
}
if waitingThreshold != threshold {
t.Fatalf("unexpected waiting threshold: got=%d want=%d", waitingThreshold, threshold)
}
}
func TestWaitingBacklogThresholdHonorsMaxResultsCap(t *testing.T) {
t.Parallel()
policy := schedulerPolicy{
ExecutionConcurrency: 8,
MaxResults: 6,
}
threshold := waitingBacklogThreshold(policy)
if threshold != 6 {
t.Fatalf("expected threshold to be capped by max results, got=%d", threshold)
}
}
func TestListSchedulerStatesIncludesPolicyAndState(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
const jobType = "vacuum"
err = pluginSvc.SaveJobTypeConfig(&plugin_pb.PersistedJobTypeConfig{
JobType: jobType,
AdminRuntime: &plugin_pb.AdminRuntimeConfig{
Enabled: true,
DetectionIntervalSeconds: 45,
DetectionTimeoutSeconds: 30,
MaxJobsPerDetection: 80,
GlobalExecutionConcurrency: 3,
PerWorkerExecutionConcurrency: 2,
RetryLimit: 1,
RetryBackoffSeconds: 9,
},
})
if err != nil {
t.Fatalf("SaveJobTypeConfig: %v", err)
}
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: jobType, CanDetect: true, CanExecute: true},
},
})
nextDetectionAt := time.Now().UTC().Add(2 * time.Minute).Round(time.Second)
pluginSvc.schedulerMu.Lock()
pluginSvc.nextDetectionAt[jobType] = nextDetectionAt
pluginSvc.detectionInFlight[jobType] = true
pluginSvc.schedulerMu.Unlock()
states, err := pluginSvc.ListSchedulerStates()
if err != nil {
t.Fatalf("ListSchedulerStates: %v", err)
}
state := findSchedulerState(states, jobType)
if state == nil {
t.Fatalf("missing scheduler state for %s", jobType)
}
if !state.Enabled {
t.Fatalf("expected enabled scheduler state")
}
if state.PolicyError != "" {
t.Fatalf("unexpected policy error: %s", state.PolicyError)
}
if !state.DetectionInFlight {
t.Fatalf("expected detection in flight")
}
if state.NextDetectionAt == nil {
t.Fatalf("expected next detection time")
}
if state.NextDetectionAt.Unix() != nextDetectionAt.Unix() {
t.Fatalf("unexpected next detection time: got=%v want=%v", state.NextDetectionAt, nextDetectionAt)
}
if state.DetectionIntervalSeconds != 45 {
t.Fatalf("unexpected detection interval: got=%d", state.DetectionIntervalSeconds)
}
if state.DetectionTimeoutSeconds != 30 {
t.Fatalf("unexpected detection timeout: got=%d", state.DetectionTimeoutSeconds)
}
if state.ExecutionTimeoutSeconds != 90 {
t.Fatalf("unexpected execution timeout: got=%d", state.ExecutionTimeoutSeconds)
}
if state.MaxJobsPerDetection != 80 {
t.Fatalf("unexpected max jobs per detection: got=%d", state.MaxJobsPerDetection)
}
if state.GlobalExecutionConcurrency != 3 {
t.Fatalf("unexpected global execution concurrency: got=%d", state.GlobalExecutionConcurrency)
}
if state.PerWorkerExecutionConcurrency != 2 {
t.Fatalf("unexpected per worker execution concurrency: got=%d", state.PerWorkerExecutionConcurrency)
}
if state.RetryLimit != 1 {
t.Fatalf("unexpected retry limit: got=%d", state.RetryLimit)
}
if state.RetryBackoffSeconds != 9 {
t.Fatalf("unexpected retry backoff: got=%d", state.RetryBackoffSeconds)
}
if !state.DetectorAvailable || state.DetectorWorkerID != "worker-a" {
t.Fatalf("unexpected detector assignment: available=%v worker=%s", state.DetectorAvailable, state.DetectorWorkerID)
}
if state.ExecutorWorkerCount != 1 {
t.Fatalf("unexpected executor worker count: got=%d", state.ExecutorWorkerCount)
}
}
func TestListSchedulerStatesShowsDisabledWhenNoPolicy(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
const jobType = "balance"
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-b",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: jobType, CanDetect: true, CanExecute: true},
},
})
states, err := pluginSvc.ListSchedulerStates()
if err != nil {
t.Fatalf("ListSchedulerStates: %v", err)
}
state := findSchedulerState(states, jobType)
if state == nil {
t.Fatalf("missing scheduler state for %s", jobType)
}
if state.Enabled {
t.Fatalf("expected disabled scheduler state")
}
if state.PolicyError != "" {
t.Fatalf("unexpected policy error: %s", state.PolicyError)
}
if !state.DetectorAvailable || state.DetectorWorkerID != "worker-b" {
t.Fatalf("unexpected detector details: available=%v worker=%s", state.DetectorAvailable, state.DetectorWorkerID)
}
if state.ExecutorWorkerCount != 1 {
t.Fatalf("unexpected executor worker count: got=%d", state.ExecutorWorkerCount)
}
}
func findSchedulerState(states []SchedulerJobTypeState, jobType string) *SchedulerJobTypeState {
for i := range states {
if states[i].JobType == jobType {
return &states[i]
}
}
return nil
}
func TestPickDetectorPrefersLeasedWorker(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true},
},
})
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-b",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true},
},
})
pluginSvc.setDetectorLease("vacuum", "worker-b")
detector, err := pluginSvc.pickDetector("vacuum")
if err != nil {
t.Fatalf("pickDetector: %v", err)
}
if detector.WorkerID != "worker-b" {
t.Fatalf("expected leased detector worker-b, got=%s", detector.WorkerID)
}
}
func TestPickDetectorReassignsWhenLeaseIsStale(t *testing.T) {
t.Parallel()
pluginSvc, err := New(Options{})
if err != nil {
t.Fatalf("New: %v", err)
}
defer pluginSvc.Shutdown()
pluginSvc.registry.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true},
},
})
pluginSvc.setDetectorLease("vacuum", "worker-stale")
detector, err := pluginSvc.pickDetector("vacuum")
if err != nil {
t.Fatalf("pickDetector: %v", err)
}
if detector.WorkerID != "worker-a" {
t.Fatalf("expected reassigned detector worker-a, got=%s", detector.WorkerID)
}
lease := pluginSvc.getDetectorLease("vacuum")
if lease != "worker-a" {
t.Fatalf("expected detector lease to be updated to worker-a, got=%s", lease)
}
}

View File

@@ -0,0 +1,66 @@
package plugin
import (
"context"
"sort"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
)
const descriptorPrefetchTimeout = 20 * time.Second
func (r *Plugin) prefetchDescriptorsFromHello(hello *plugin_pb.WorkerHello) {
if hello == nil || len(hello.Capabilities) == 0 {
return
}
jobTypeSet := make(map[string]struct{})
for _, capability := range hello.Capabilities {
if capability == nil || capability.JobType == "" {
continue
}
if !capability.CanDetect && !capability.CanExecute {
continue
}
jobTypeSet[capability.JobType] = struct{}{}
}
if len(jobTypeSet) == 0 {
return
}
jobTypes := make([]string, 0, len(jobTypeSet))
for jobType := range jobTypeSet {
jobTypes = append(jobTypes, jobType)
}
sort.Strings(jobTypes)
for _, jobType := range jobTypes {
select {
case <-r.shutdownCh:
return
default:
}
descriptor, err := r.store.LoadDescriptor(jobType)
if err != nil {
glog.Warningf("Plugin descriptor prefetch check failed for %s: %v", jobType, err)
continue
}
if descriptor != nil {
continue
}
ctx, cancel := context.WithTimeout(r.ctx, descriptorPrefetchTimeout)
_, err = r.RequestConfigSchema(ctx, jobType, false)
cancel()
if err != nil {
glog.V(1).Infof("Plugin descriptor prefetch skipped for %s: %v", jobType, err)
continue
}
glog.V(1).Infof("Plugin descriptor prefetched for job_type=%s", jobType)
}
}

View File

@@ -0,0 +1,465 @@
package plugin
import (
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
)
const defaultWorkerStaleTimeout = 2 * time.Minute
// WorkerSession contains tracked worker metadata and plugin status.
type WorkerSession struct {
WorkerID string
WorkerInstance string
Address string
WorkerVersion string
ProtocolVersion string
ConnectedAt time.Time
LastSeenAt time.Time
Capabilities map[string]*plugin_pb.JobTypeCapability
Heartbeat *plugin_pb.WorkerHeartbeat
}
// Registry tracks connected plugin workers and capability-based selection.
type Registry struct {
mu sync.RWMutex
sessions map[string]*WorkerSession
staleAfter time.Duration
detectorCursor map[string]int
executorCursor map[string]int
}
func NewRegistry() *Registry {
return &Registry{
sessions: make(map[string]*WorkerSession),
staleAfter: defaultWorkerStaleTimeout,
detectorCursor: make(map[string]int),
executorCursor: make(map[string]int),
}
}
func (r *Registry) UpsertFromHello(hello *plugin_pb.WorkerHello) *WorkerSession {
now := time.Now()
caps := make(map[string]*plugin_pb.JobTypeCapability, len(hello.Capabilities))
for _, c := range hello.Capabilities {
if c == nil || c.JobType == "" {
continue
}
caps[c.JobType] = cloneJobTypeCapability(c)
}
r.mu.Lock()
defer r.mu.Unlock()
session, ok := r.sessions[hello.WorkerId]
if !ok {
session = &WorkerSession{
WorkerID: hello.WorkerId,
ConnectedAt: now,
}
r.sessions[hello.WorkerId] = session
}
session.WorkerInstance = hello.WorkerInstanceId
session.Address = hello.Address
session.WorkerVersion = hello.WorkerVersion
session.ProtocolVersion = hello.ProtocolVersion
session.LastSeenAt = now
session.Capabilities = caps
return cloneWorkerSession(session)
}
func (r *Registry) Remove(workerID string) {
r.mu.Lock()
defer r.mu.Unlock()
delete(r.sessions, workerID)
}
func (r *Registry) UpdateHeartbeat(workerID string, heartbeat *plugin_pb.WorkerHeartbeat) {
r.mu.Lock()
defer r.mu.Unlock()
session, ok := r.sessions[workerID]
if !ok {
return
}
session.Heartbeat = cloneWorkerHeartbeat(heartbeat)
session.LastSeenAt = time.Now()
}
func (r *Registry) Get(workerID string) (*WorkerSession, bool) {
r.mu.RLock()
defer r.mu.RUnlock()
session, ok := r.sessions[workerID]
if !ok || r.isSessionStaleLocked(session, time.Now()) {
return nil, false
}
return cloneWorkerSession(session), true
}
func (r *Registry) List() []*WorkerSession {
r.mu.RLock()
defer r.mu.RUnlock()
out := make([]*WorkerSession, 0, len(r.sessions))
now := time.Now()
for _, s := range r.sessions {
if r.isSessionStaleLocked(s, now) {
continue
}
out = append(out, cloneWorkerSession(s))
}
sort.Slice(out, func(i, j int) bool {
return out[i].WorkerID < out[j].WorkerID
})
return out
}
// DetectableJobTypes returns sorted job types that currently have at least one detect-capable worker.
func (r *Registry) DetectableJobTypes() []string {
r.mu.RLock()
defer r.mu.RUnlock()
jobTypes := make(map[string]struct{})
now := time.Now()
for _, session := range r.sessions {
if r.isSessionStaleLocked(session, now) {
continue
}
for jobType, capability := range session.Capabilities {
if capability == nil || !capability.CanDetect {
continue
}
jobTypes[jobType] = struct{}{}
}
}
out := make([]string, 0, len(jobTypes))
for jobType := range jobTypes {
out = append(out, jobType)
}
sort.Strings(out)
return out
}
// JobTypes returns sorted job types known by connected workers regardless of capability kind.
func (r *Registry) JobTypes() []string {
r.mu.RLock()
defer r.mu.RUnlock()
jobTypes := make(map[string]struct{})
now := time.Now()
for _, session := range r.sessions {
if r.isSessionStaleLocked(session, now) {
continue
}
for jobType := range session.Capabilities {
if jobType == "" {
continue
}
jobTypes[jobType] = struct{}{}
}
}
out := make([]string, 0, len(jobTypes))
for jobType := range jobTypes {
out = append(out, jobType)
}
sort.Strings(out)
return out
}
// PickSchemaProvider picks one worker for schema requests.
// Preference order:
// 1) workers that can detect this job type
// 2) workers that can execute this job type
// tie-break: more free slots, then lexical worker ID.
func (r *Registry) PickSchemaProvider(jobType string) (*WorkerSession, error) {
r.mu.RLock()
defer r.mu.RUnlock()
var candidates []*WorkerSession
now := time.Now()
for _, s := range r.sessions {
if r.isSessionStaleLocked(s, now) {
continue
}
capability := s.Capabilities[jobType]
if capability == nil {
continue
}
if capability.CanDetect || capability.CanExecute {
candidates = append(candidates, s)
}
}
if len(candidates) == 0 {
return nil, fmt.Errorf("no worker available for schema job_type=%s", jobType)
}
sort.Slice(candidates, func(i, j int) bool {
a := candidates[i]
b := candidates[j]
ac := a.Capabilities[jobType]
bc := b.Capabilities[jobType]
// Prefer detect-capable providers first.
if ac.CanDetect != bc.CanDetect {
return ac.CanDetect
}
aSlots := availableDetectionSlots(a, ac) + availableExecutionSlots(a, ac)
bSlots := availableDetectionSlots(b, bc) + availableExecutionSlots(b, bc)
if aSlots != bSlots {
return aSlots > bSlots
}
return a.WorkerID < b.WorkerID
})
return cloneWorkerSession(candidates[0]), nil
}
// PickDetector picks one detector worker for a job type.
func (r *Registry) PickDetector(jobType string) (*WorkerSession, error) {
return r.pickByKind(jobType, true)
}
// PickExecutor picks one executor worker for a job type.
func (r *Registry) PickExecutor(jobType string) (*WorkerSession, error) {
return r.pickByKind(jobType, false)
}
// ListExecutors returns sorted executor candidates for one job type.
// Ordering is by most available execution slots, then lexical worker ID.
// The top tie group is rotated round-robin to prevent sticky assignment.
func (r *Registry) ListExecutors(jobType string) ([]*WorkerSession, error) {
r.mu.Lock()
defer r.mu.Unlock()
candidates := r.collectByKindLocked(jobType, false, time.Now())
if len(candidates) == 0 {
return nil, fmt.Errorf("no executor worker available for job_type=%s", jobType)
}
sortByKind(candidates, jobType, false)
r.rotateTopCandidatesLocked(candidates, jobType, false)
out := make([]*WorkerSession, 0, len(candidates))
for _, candidate := range candidates {
out = append(out, cloneWorkerSession(candidate))
}
return out, nil
}
func (r *Registry) pickByKind(jobType string, detect bool) (*WorkerSession, error) {
r.mu.Lock()
defer r.mu.Unlock()
candidates := r.collectByKindLocked(jobType, detect, time.Now())
if len(candidates) == 0 {
kind := "executor"
if detect {
kind = "detector"
}
return nil, fmt.Errorf("no %s worker available for job_type=%s", kind, jobType)
}
sortByKind(candidates, jobType, detect)
r.rotateTopCandidatesLocked(candidates, jobType, detect)
return cloneWorkerSession(candidates[0]), nil
}
func (r *Registry) collectByKindLocked(jobType string, detect bool, now time.Time) []*WorkerSession {
var candidates []*WorkerSession
for _, session := range r.sessions {
if r.isSessionStaleLocked(session, now) {
continue
}
capability := session.Capabilities[jobType]
if capability == nil {
continue
}
if detect && capability.CanDetect {
candidates = append(candidates, session)
}
if !detect && capability.CanExecute {
candidates = append(candidates, session)
}
}
return candidates
}
func (r *Registry) isSessionStaleLocked(session *WorkerSession, now time.Time) bool {
if session == nil {
return true
}
if r.staleAfter <= 0 {
return false
}
lastSeen := session.LastSeenAt
if lastSeen.IsZero() {
lastSeen = session.ConnectedAt
}
if lastSeen.IsZero() {
return false
}
return now.Sub(lastSeen) > r.staleAfter
}
func sortByKind(candidates []*WorkerSession, jobType string, detect bool) {
sort.Slice(candidates, func(i, j int) bool {
a := candidates[i]
b := candidates[j]
ac := a.Capabilities[jobType]
bc := b.Capabilities[jobType]
aSlots := availableSlotsByKind(a, ac, detect)
bSlots := availableSlotsByKind(b, bc, detect)
if aSlots != bSlots {
return aSlots > bSlots
}
return a.WorkerID < b.WorkerID
})
}
func (r *Registry) rotateTopCandidatesLocked(candidates []*WorkerSession, jobType string, detect bool) {
if len(candidates) < 2 {
return
}
capability := candidates[0].Capabilities[jobType]
topSlots := availableSlotsByKind(candidates[0], capability, detect)
tieEnd := 1
for tieEnd < len(candidates) {
nextCapability := candidates[tieEnd].Capabilities[jobType]
if availableSlotsByKind(candidates[tieEnd], nextCapability, detect) != topSlots {
break
}
tieEnd++
}
if tieEnd <= 1 {
return
}
cursorKey := strings.TrimSpace(jobType)
if cursorKey == "" {
cursorKey = "*"
}
var offset int
if detect {
offset = r.detectorCursor[cursorKey] % tieEnd
r.detectorCursor[cursorKey] = (offset + 1) % tieEnd
} else {
offset = r.executorCursor[cursorKey] % tieEnd
r.executorCursor[cursorKey] = (offset + 1) % tieEnd
}
if offset == 0 {
return
}
prefix := append([]*WorkerSession(nil), candidates[:tieEnd]...)
for i := 0; i < tieEnd; i++ {
candidates[i] = prefix[(i+offset)%tieEnd]
}
}
func availableSlotsByKind(
session *WorkerSession,
capability *plugin_pb.JobTypeCapability,
detect bool,
) int {
if detect {
return availableDetectionSlots(session, capability)
}
return availableExecutionSlots(session, capability)
}
func availableDetectionSlots(session *WorkerSession, capability *plugin_pb.JobTypeCapability) int {
if session.Heartbeat != nil && session.Heartbeat.DetectionSlotsTotal > 0 {
free := int(session.Heartbeat.DetectionSlotsTotal - session.Heartbeat.DetectionSlotsUsed)
if free < 0 {
return 0
}
return free
}
if capability.MaxDetectionConcurrency > 0 {
return int(capability.MaxDetectionConcurrency)
}
return 1
}
func availableExecutionSlots(session *WorkerSession, capability *plugin_pb.JobTypeCapability) int {
if session.Heartbeat != nil && session.Heartbeat.ExecutionSlotsTotal > 0 {
free := int(session.Heartbeat.ExecutionSlotsTotal - session.Heartbeat.ExecutionSlotsUsed)
if free < 0 {
return 0
}
return free
}
if capability.MaxExecutionConcurrency > 0 {
return int(capability.MaxExecutionConcurrency)
}
return 1
}
func cloneWorkerSession(in *WorkerSession) *WorkerSession {
if in == nil {
return nil
}
out := *in
out.Capabilities = make(map[string]*plugin_pb.JobTypeCapability, len(in.Capabilities))
for jobType, cap := range in.Capabilities {
out.Capabilities[jobType] = cloneJobTypeCapability(cap)
}
out.Heartbeat = cloneWorkerHeartbeat(in.Heartbeat)
return &out
}
func cloneJobTypeCapability(in *plugin_pb.JobTypeCapability) *plugin_pb.JobTypeCapability {
if in == nil {
return nil
}
out := *in
return &out
}
func cloneWorkerHeartbeat(in *plugin_pb.WorkerHeartbeat) *plugin_pb.WorkerHeartbeat {
if in == nil {
return nil
}
out := *in
if in.RunningWork != nil {
out.RunningWork = make([]*plugin_pb.RunningWork, 0, len(in.RunningWork))
for _, rw := range in.RunningWork {
if rw == nil {
continue
}
clone := *rw
out.RunningWork = append(out.RunningWork, &clone)
}
}
if in.QueuedJobsByType != nil {
out.QueuedJobsByType = make(map[string]int32, len(in.QueuedJobsByType))
for k, v := range in.QueuedJobsByType {
out.QueuedJobsByType[k] = v
}
}
if in.Metadata != nil {
out.Metadata = make(map[string]string, len(in.Metadata))
for k, v := range in.Metadata {
out.Metadata[k] = v
}
}
return &out
}

View File

@@ -0,0 +1,321 @@
package plugin
import (
"reflect"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
)
func TestRegistryPickDetectorPrefersMoreFreeSlots(t *testing.T) {
t.Parallel()
r := NewRegistry()
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true, CanExecute: true, MaxDetectionConcurrency: 2, MaxExecutionConcurrency: 2},
},
})
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-b",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true, CanExecute: true, MaxDetectionConcurrency: 4, MaxExecutionConcurrency: 4},
},
})
r.UpdateHeartbeat("worker-a", &plugin_pb.WorkerHeartbeat{
WorkerId: "worker-a",
DetectionSlotsUsed: 1,
DetectionSlotsTotal: 2,
})
r.UpdateHeartbeat("worker-b", &plugin_pb.WorkerHeartbeat{
WorkerId: "worker-b",
DetectionSlotsUsed: 1,
DetectionSlotsTotal: 4,
})
picked, err := r.PickDetector("vacuum")
if err != nil {
t.Fatalf("PickDetector: %v", err)
}
if picked.WorkerID != "worker-b" {
t.Fatalf("unexpected detector picked: got %s want worker-b", picked.WorkerID)
}
}
func TestRegistryPickExecutorAllowsSameWorker(t *testing.T) {
t.Parallel()
r := NewRegistry()
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-x",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanDetect: true, CanExecute: true, MaxDetectionConcurrency: 1, MaxExecutionConcurrency: 1},
},
})
detector, err := r.PickDetector("balance")
if err != nil {
t.Fatalf("PickDetector: %v", err)
}
executor, err := r.PickExecutor("balance")
if err != nil {
t.Fatalf("PickExecutor: %v", err)
}
if detector.WorkerID != "worker-x" || executor.WorkerID != "worker-x" {
t.Fatalf("expected same worker for detect/execute, got detector=%s executor=%s", detector.WorkerID, executor.WorkerID)
}
}
func TestRegistryDetectableJobTypes(t *testing.T) {
t.Parallel()
r := NewRegistry()
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true, CanExecute: true},
{JobType: "balance", CanDetect: false, CanExecute: true},
},
})
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-b",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "ec", CanDetect: true, CanExecute: false},
{JobType: "vacuum", CanDetect: true, CanExecute: false},
},
})
got := r.DetectableJobTypes()
want := []string{"ec", "vacuum"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("unexpected detectable job types: got=%v want=%v", got, want)
}
}
func TestRegistryJobTypes(t *testing.T) {
t.Parallel()
r := NewRegistry()
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true},
{JobType: "balance", CanExecute: true},
},
})
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-b",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "ec", CanDetect: true},
},
})
got := r.JobTypes()
want := []string{"balance", "ec", "vacuum"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("unexpected job types: got=%v want=%v", got, want)
}
}
func TestRegistryListExecutorsSortedBySlots(t *testing.T) {
t.Parallel()
r := NewRegistry()
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanExecute: true, MaxExecutionConcurrency: 2},
},
})
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-b",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanExecute: true, MaxExecutionConcurrency: 4},
},
})
r.UpdateHeartbeat("worker-a", &plugin_pb.WorkerHeartbeat{
WorkerId: "worker-a",
ExecutionSlotsUsed: 1,
ExecutionSlotsTotal: 2,
})
r.UpdateHeartbeat("worker-b", &plugin_pb.WorkerHeartbeat{
WorkerId: "worker-b",
ExecutionSlotsUsed: 1,
ExecutionSlotsTotal: 4,
})
executors, err := r.ListExecutors("balance")
if err != nil {
t.Fatalf("ListExecutors: %v", err)
}
if len(executors) != 2 {
t.Fatalf("unexpected candidate count: got=%d", len(executors))
}
if executors[0].WorkerID != "worker-b" || executors[1].WorkerID != "worker-a" {
t.Fatalf("unexpected executor order: got=%s,%s", executors[0].WorkerID, executors[1].WorkerID)
}
}
func TestRegistryPickExecutorRoundRobinForTopTie(t *testing.T) {
t.Parallel()
r := NewRegistry()
for _, workerID := range []string{"worker-a", "worker-b", "worker-c"} {
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: workerID,
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanExecute: true, MaxExecutionConcurrency: 1},
},
})
}
got := make([]string, 0, 6)
for i := 0; i < 6; i++ {
executor, err := r.PickExecutor("balance")
if err != nil {
t.Fatalf("PickExecutor: %v", err)
}
got = append(got, executor.WorkerID)
}
want := []string{"worker-a", "worker-b", "worker-c", "worker-a", "worker-b", "worker-c"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("unexpected pick order: got=%v want=%v", got, want)
}
}
func TestRegistryListExecutorsRoundRobinForTopTie(t *testing.T) {
t.Parallel()
r := NewRegistry()
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanExecute: true, MaxExecutionConcurrency: 2},
},
})
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-b",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanExecute: true, MaxExecutionConcurrency: 2},
},
})
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-c",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "balance", CanExecute: true, MaxExecutionConcurrency: 1},
},
})
r.UpdateHeartbeat("worker-a", &plugin_pb.WorkerHeartbeat{
WorkerId: "worker-a",
ExecutionSlotsUsed: 0,
ExecutionSlotsTotal: 2,
})
r.UpdateHeartbeat("worker-b", &plugin_pb.WorkerHeartbeat{
WorkerId: "worker-b",
ExecutionSlotsUsed: 0,
ExecutionSlotsTotal: 2,
})
r.UpdateHeartbeat("worker-c", &plugin_pb.WorkerHeartbeat{
WorkerId: "worker-c",
ExecutionSlotsUsed: 0,
ExecutionSlotsTotal: 1,
})
firstCall, err := r.ListExecutors("balance")
if err != nil {
t.Fatalf("ListExecutors first call: %v", err)
}
secondCall, err := r.ListExecutors("balance")
if err != nil {
t.Fatalf("ListExecutors second call: %v", err)
}
thirdCall, err := r.ListExecutors("balance")
if err != nil {
t.Fatalf("ListExecutors third call: %v", err)
}
if firstCall[0].WorkerID != "worker-a" || firstCall[1].WorkerID != "worker-b" || firstCall[2].WorkerID != "worker-c" {
t.Fatalf("unexpected first executor order: got=%s,%s,%s", firstCall[0].WorkerID, firstCall[1].WorkerID, firstCall[2].WorkerID)
}
if secondCall[0].WorkerID != "worker-b" || secondCall[1].WorkerID != "worker-a" || secondCall[2].WorkerID != "worker-c" {
t.Fatalf("unexpected second executor order: got=%s,%s,%s", secondCall[0].WorkerID, secondCall[1].WorkerID, secondCall[2].WorkerID)
}
if thirdCall[0].WorkerID != "worker-a" || thirdCall[1].WorkerID != "worker-b" || thirdCall[2].WorkerID != "worker-c" {
t.Fatalf("unexpected third executor order: got=%s,%s,%s", thirdCall[0].WorkerID, thirdCall[1].WorkerID, thirdCall[2].WorkerID)
}
}
func TestRegistrySkipsStaleWorkersForSelectionAndListing(t *testing.T) {
t.Parallel()
r := NewRegistry()
r.staleAfter = 2 * time.Second
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-stale",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true, CanExecute: true},
},
})
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-fresh",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true, CanExecute: true},
},
})
r.mu.Lock()
r.sessions["worker-stale"].LastSeenAt = time.Now().Add(-10 * time.Second)
r.sessions["worker-fresh"].LastSeenAt = time.Now()
r.mu.Unlock()
picked, err := r.PickDetector("vacuum")
if err != nil {
t.Fatalf("PickDetector: %v", err)
}
if picked.WorkerID != "worker-fresh" {
t.Fatalf("unexpected detector: got=%s want=worker-fresh", picked.WorkerID)
}
if _, ok := r.Get("worker-stale"); ok {
t.Fatalf("expected stale worker to be hidden from Get")
}
if _, ok := r.Get("worker-fresh"); !ok {
t.Fatalf("expected fresh worker from Get")
}
listed := r.List()
if len(listed) != 1 || listed[0].WorkerID != "worker-fresh" {
t.Fatalf("unexpected listed workers: %+v", listed)
}
}
func TestRegistryReturnsNoDetectorWhenAllWorkersStale(t *testing.T) {
t.Parallel()
r := NewRegistry()
r.staleAfter = 2 * time.Second
r.UpsertFromHello(&plugin_pb.WorkerHello{
WorkerId: "worker-a",
Capabilities: []*plugin_pb.JobTypeCapability{
{JobType: "vacuum", CanDetect: true},
},
})
r.mu.Lock()
r.sessions["worker-a"].LastSeenAt = time.Now().Add(-10 * time.Second)
r.mu.Unlock()
if _, err := r.PickDetector("vacuum"); err == nil {
t.Fatalf("expected no detector when all workers are stale")
}
}

103
weed/admin/plugin/types.go Normal file
View File

@@ -0,0 +1,103 @@
package plugin
import "time"
const (
// Keep exactly the last 10 successful and last 10 error runs per job type.
MaxSuccessfulRunHistory = 10
MaxErrorRunHistory = 10
)
type RunOutcome string
const (
RunOutcomeSuccess RunOutcome = "success"
RunOutcomeError RunOutcome = "error"
)
type JobRunRecord struct {
RunID string `json:"run_id"`
JobID string `json:"job_id"`
JobType string `json:"job_type"`
WorkerID string `json:"worker_id"`
Outcome RunOutcome `json:"outcome"`
Message string `json:"message,omitempty"`
DurationMs int64 `json:"duration_ms,omitempty"`
CompletedAt *time.Time `json:"completed_at,omitempty"`
}
type JobTypeRunHistory struct {
JobType string `json:"job_type"`
SuccessfulRuns []JobRunRecord `json:"successful_runs"`
ErrorRuns []JobRunRecord `json:"error_runs"`
LastUpdatedTime *time.Time `json:"last_updated_time,omitempty"`
}
type TrackedJob struct {
JobID string `json:"job_id"`
JobType string `json:"job_type"`
RequestID string `json:"request_id"`
WorkerID string `json:"worker_id"`
DedupeKey string `json:"dedupe_key,omitempty"`
Summary string `json:"summary,omitempty"`
Detail string `json:"detail,omitempty"`
Parameters map[string]interface{} `json:"parameters,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
State string `json:"state"`
Progress float64 `json:"progress"`
Stage string `json:"stage,omitempty"`
Message string `json:"message,omitempty"`
Attempt int32 `json:"attempt,omitempty"`
CreatedAt *time.Time `json:"created_at,omitempty"`
UpdatedAt *time.Time `json:"updated_at,omitempty"`
CompletedAt *time.Time `json:"completed_at,omitempty"`
ErrorMessage string `json:"error_message,omitempty"`
ResultSummary string `json:"result_summary,omitempty"`
ResultOutputValues map[string]interface{} `json:"result_output_values,omitempty"`
}
type JobActivity struct {
JobID string `json:"job_id"`
JobType string `json:"job_type"`
RequestID string `json:"request_id,omitempty"`
WorkerID string `json:"worker_id,omitempty"`
Source string `json:"source"`
Message string `json:"message"`
Stage string `json:"stage,omitempty"`
Details map[string]interface{} `json:"details,omitempty"`
OccurredAt *time.Time `json:"occurred_at,omitempty"`
}
type JobDetail struct {
Job *TrackedJob `json:"job"`
RunRecord *JobRunRecord `json:"run_record,omitempty"`
Activities []JobActivity `json:"activities"`
RelatedJobs []TrackedJob `json:"related_jobs,omitempty"`
LastUpdated *time.Time `json:"last_updated,omitempty"`
}
type SchedulerJobTypeState struct {
JobType string `json:"job_type"`
Enabled bool `json:"enabled"`
PolicyError string `json:"policy_error,omitempty"`
DetectionInFlight bool `json:"detection_in_flight"`
NextDetectionAt *time.Time `json:"next_detection_at,omitempty"`
DetectionIntervalSeconds int32 `json:"detection_interval_seconds,omitempty"`
DetectionTimeoutSeconds int32 `json:"detection_timeout_seconds,omitempty"`
ExecutionTimeoutSeconds int32 `json:"execution_timeout_seconds,omitempty"`
MaxJobsPerDetection int32 `json:"max_jobs_per_detection,omitempty"`
GlobalExecutionConcurrency int `json:"global_execution_concurrency,omitempty"`
PerWorkerExecutionConcurrency int `json:"per_worker_execution_concurrency,omitempty"`
RetryLimit int `json:"retry_limit,omitempty"`
RetryBackoffSeconds int32 `json:"retry_backoff_seconds,omitempty"`
DetectorAvailable bool `json:"detector_available"`
DetectorWorkerID string `json:"detector_worker_id,omitempty"`
ExecutorWorkerCount int `json:"executor_worker_count"`
}
func timeToPtr(t time.Time) *time.Time {
if t.IsZero() {
return nil
}
return &t
}

View File

@@ -129,21 +129,6 @@ function setupSubmenuBehavior() {
} }
} }
// If we're on a maintenance page, expand the maintenance submenu
if (currentPath.startsWith('/maintenance')) {
const maintenanceSubmenu = document.getElementById('maintenanceSubmenu');
if (maintenanceSubmenu) {
maintenanceSubmenu.classList.add('show');
// Update the parent toggle button state
const toggleButton = document.querySelector('[data-bs-target="#maintenanceSubmenu"]');
if (toggleButton) {
toggleButton.classList.remove('collapsed');
toggleButton.setAttribute('aria-expanded', 'true');
}
}
}
// Prevent submenu from collapsing when clicking on submenu items // Prevent submenu from collapsing when clicking on submenu items
const clusterSubmenuLinks = document.querySelectorAll('#clusterSubmenu .nav-link'); const clusterSubmenuLinks = document.querySelectorAll('#clusterSubmenu .nav-link');
clusterSubmenuLinks.forEach(function (link) { clusterSubmenuLinks.forEach(function (link) {
@@ -161,14 +146,6 @@ function setupSubmenuBehavior() {
}); });
}); });
const maintenanceSubmenuLinks = document.querySelectorAll('#maintenanceSubmenu .nav-link');
maintenanceSubmenuLinks.forEach(function (link) {
link.addEventListener('click', function (e) {
// Don't prevent the navigation, just stop the collapse behavior
e.stopPropagation();
});
});
// Handle the main cluster toggle // Handle the main cluster toggle
const clusterToggle = document.querySelector('[data-bs-target="#clusterSubmenu"]'); const clusterToggle = document.querySelector('[data-bs-target="#clusterSubmenu"]');
if (clusterToggle) { if (clusterToggle) {
@@ -215,28 +192,6 @@ function setupSubmenuBehavior() {
}); });
} }
// Handle the main maintenance toggle
const maintenanceToggle = document.querySelector('[data-bs-target="#maintenanceSubmenu"]');
if (maintenanceToggle) {
maintenanceToggle.addEventListener('click', function (e) {
e.preventDefault();
const submenu = document.getElementById('maintenanceSubmenu');
const isExpanded = submenu.classList.contains('show');
if (isExpanded) {
// Collapse
submenu.classList.remove('show');
this.classList.add('collapsed');
this.setAttribute('aria-expanded', 'false');
} else {
// Expand
submenu.classList.add('show');
this.classList.remove('collapsed');
this.setAttribute('aria-expanded', 'true');
}
});
}
} }
// Loading indicator functions // Loading indicator functions

View File

@@ -238,7 +238,7 @@ func (at *ActiveTopology) getPlanningCapacityUnsafe(disk *activeDisk) StorageSlo
func (at *ActiveTopology) isDiskAvailableForPlanning(disk *activeDisk, taskType TaskType) bool { func (at *ActiveTopology) isDiskAvailableForPlanning(disk *activeDisk, taskType TaskType) bool {
// Check total load including pending tasks // Check total load including pending tasks
totalLoad := len(disk.pendingTasks) + len(disk.assignedTasks) totalLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
if totalLoad >= MaxTotalTaskLoadPerDisk { if MaxTotalTaskLoadPerDisk > 0 && totalLoad >= MaxTotalTaskLoadPerDisk {
return false return false
} }
@@ -299,6 +299,16 @@ func (at *ActiveTopology) getEffectiveAvailableCapacityUnsafe(disk *activeDisk)
} }
baseAvailable := disk.DiskInfo.DiskInfo.MaxVolumeCount - disk.DiskInfo.DiskInfo.VolumeCount baseAvailable := disk.DiskInfo.DiskInfo.MaxVolumeCount - disk.DiskInfo.DiskInfo.VolumeCount
if baseAvailable <= 0 &&
disk.DiskInfo.DiskInfo.MaxVolumeCount == 0 &&
disk.DiskInfo.DiskInfo.VolumeCount == 0 &&
len(disk.DiskInfo.DiskInfo.VolumeInfos) == 0 &&
len(disk.DiskInfo.DiskInfo.EcShardInfos) == 0 {
// Some empty volume servers can report max_volume_counts=0 before
// publishing concrete slot limits. Keep one provisional slot so EC
// detection still sees the disk for placement planning.
baseAvailable = 1
}
netImpact := at.getEffectiveCapacityUnsafe(disk) netImpact := at.getEffectiveCapacityUnsafe(disk)
// Calculate available volume slots (negative impact reduces availability) // Calculate available volume slots (negative impact reduces availability)

View File

@@ -0,0 +1,82 @@
package topology
import (
"fmt"
"testing"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
)
func TestGetDisksWithEffectiveCapacityNotCappedAtTenByLoad(t *testing.T) {
t.Parallel()
activeTopology := NewActiveTopology(0)
if err := activeTopology.UpdateTopology(singleDiskTopologyInfoForCapacityTest()); err != nil {
t.Fatalf("UpdateTopology: %v", err)
}
const pendingTasks = 32
for i := 0; i < pendingTasks; i++ {
taskID := fmt.Sprintf("ec-capacity-%d", i)
err := activeTopology.AddPendingTask(TaskSpec{
TaskID: taskID,
TaskType: TaskTypeErasureCoding,
VolumeID: uint32(i + 1),
VolumeSize: 1,
Sources: []TaskSourceSpec{
{
ServerID: "node-a",
DiskID: 0,
StorageImpact: &StorageSlotChange{},
},
},
Destinations: []TaskDestinationSpec{
{
ServerID: "node-a",
DiskID: 0,
StorageImpact: &StorageSlotChange{},
},
},
})
if err != nil {
t.Fatalf("AddPendingTask(%s): %v", taskID, err)
}
}
disks := activeTopology.GetDisksWithEffectiveCapacity(TaskTypeErasureCoding, "", 1)
if len(disks) != 1 {
t.Fatalf("expected disk to remain available after %d pending tasks, got %d", pendingTasks, len(disks))
}
if disks[0].LoadCount != pendingTasks {
t.Fatalf("unexpected load count: got=%d want=%d", disks[0].LoadCount, pendingTasks)
}
}
func singleDiskTopologyInfoForCapacityTest() *master_pb.TopologyInfo {
return &master_pb.TopologyInfo{
Id: "topology-test",
DataCenterInfos: []*master_pb.DataCenterInfo{
{
Id: "dc1",
RackInfos: []*master_pb.RackInfo{
{
Id: "rack1",
DataNodeInfos: []*master_pb.DataNodeInfo{
{
Id: "node-a",
DiskInfos: map[string]*master_pb.DiskInfo{
"hdd": {
DiskId: 0,
Type: "hdd",
VolumeCount: 0,
MaxVolumeCount: 1000,
},
},
},
},
},
},
},
},
}
}

View File

@@ -68,7 +68,7 @@ func (at *ActiveTopology) assignTaskToDisk(task *taskState) {
func (at *ActiveTopology) isDiskAvailable(disk *activeDisk, taskType TaskType) bool { func (at *ActiveTopology) isDiskAvailable(disk *activeDisk, taskType TaskType) bool {
// Check if disk has too many pending and active tasks // Check if disk has too many pending and active tasks
activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks) activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
if activeLoad >= MaxConcurrentTasksPerDisk { if MaxConcurrentTasksPerDisk > 0 && activeLoad >= MaxConcurrentTasksPerDisk {
return false return false
} }

View File

@@ -317,6 +317,60 @@ func TestStorageSlotChangeCapacityCalculation(t *testing.T) {
assert.Equal(t, int32(0), reservedShard, "Should show 0 reserved shard slots") assert.Equal(t, int32(0), reservedShard, "Should show 0 reserved shard slots")
} }
func TestGetDisksWithEffectiveCapacity_UnknownEmptyDiskFallback(t *testing.T) {
activeTopology := NewActiveTopology(10)
topologyInfo := &master_pb.TopologyInfo{
DataCenterInfos: []*master_pb.DataCenterInfo{
{
Id: "dc1",
RackInfos: []*master_pb.RackInfo{
{
Id: "rack1",
DataNodeInfos: []*master_pb.DataNodeInfo{
{
Id: "empty-node",
DiskInfos: map[string]*master_pb.DiskInfo{
"hdd": {
DiskId: 0,
Type: "hdd",
VolumeCount: 0,
MaxVolumeCount: 0,
},
},
},
{
Id: "used-node",
DiskInfos: map[string]*master_pb.DiskInfo{
"hdd": {
DiskId: 0,
Type: "hdd",
VolumeCount: 1,
MaxVolumeCount: 0,
},
},
},
},
},
},
},
},
}
err := activeTopology.UpdateTopology(topologyInfo)
assert.NoError(t, err)
available := activeTopology.GetDisksWithEffectiveCapacity(TaskTypeErasureCoding, "", 1)
assert.Len(t, available, 1, "only the empty unknown-capacity disk should be treated as provisionally available")
if len(available) == 1 {
assert.Equal(t, "empty-node", available[0].NodeID)
assert.Equal(t, uint32(0), available[0].DiskID)
}
assert.Equal(t, int64(1), activeTopology.GetEffectiveAvailableCapacity("empty-node", 0))
assert.Equal(t, int64(0), activeTopology.GetEffectiveAvailableCapacity("used-node", 0))
}
// TestECMultipleTargets demonstrates proper handling of EC operations with multiple targets // TestECMultipleTargets demonstrates proper handling of EC operations with multiple targets
func TestECMultipleTargets(t *testing.T) { func TestECMultipleTargets(t *testing.T) {
activeTopology := NewActiveTopology(10) activeTopology := NewActiveTopology(10)

View File

@@ -26,17 +26,17 @@ const (
// Task and capacity management configuration constants // Task and capacity management configuration constants
const ( const (
// MaxConcurrentTasksPerDisk defines the maximum number of concurrent tasks per disk // MaxConcurrentTasksPerDisk defines the maximum number of pending+assigned tasks per disk.
// This prevents overloading a single disk with too many simultaneous operations // Set to 0 to disable hard load capping and rely on effective capacity checks.
MaxConcurrentTasksPerDisk = 10 MaxConcurrentTasksPerDisk = 0
// MaxTotalTaskLoadPerDisk defines the maximum total task load (pending + active) per disk // MaxTotalTaskLoadPerDisk defines the maximum total planning load (pending + active) per disk.
// This allows more tasks to be queued but limits the total pipeline depth // Set to 0 to disable hard load capping for planning.
MaxTotalTaskLoadPerDisk = 20 MaxTotalTaskLoadPerDisk = 0
// MaxTaskLoadForECPlacement defines the maximum task load to consider a disk for EC placement // MaxTaskLoadForECPlacement defines the maximum task load to consider a disk for EC placement.
// This threshold ensures disks aren't overloaded when planning EC operations // Set to 0 to disable this filter.
MaxTaskLoadForECPlacement = 10 MaxTaskLoadForECPlacement = 0
) )
// StorageSlotChange represents storage impact at both volume and shard levels // StorageSlotChange represents storage impact at both volume and shard levels

View File

@@ -3,6 +3,7 @@ package app
import ( import (
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/admin/dash" "github.com/seaweedfs/seaweedfs/weed/admin/dash"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
) )
templ EcVolumeDetails(data dash.EcVolumeDetailsData) { templ EcVolumeDetails(data dash.EcVolumeDetailsData) {
@@ -61,11 +62,11 @@ templ EcVolumeDetails(data dash.EcVolumeDetailsData) {
<td> <td>
if data.IsComplete { if data.IsComplete {
<span class="badge bg-success"> <span class="badge bg-success">
<i class="fas fa-check me-1"></i>Complete ({data.TotalShards}/14 shards) <i class="fas fa-check me-1"></i>Complete ({data.TotalShards}/{fmt.Sprintf("%d", erasure_coding.TotalShardsCount)} shards)
</span> </span>
} else { } else {
<span class="badge bg-warning"> <span class="badge bg-warning">
<i class="fas fa-exclamation-triangle me-1"></i>Incomplete ({data.TotalShards}/14 shards) <i class="fas fa-exclamation-triangle me-1"></i>Incomplete ({data.TotalShards}/{fmt.Sprintf("%d", erasure_coding.TotalShardsCount)} shards)
</span> </span>
} }
</td> </td>
@@ -78,7 +79,7 @@ templ EcVolumeDetails(data dash.EcVolumeDetailsData) {
if i > 0 { if i > 0 {
<span>, </span> <span>, </span>
} }
<span class="badge bg-danger">{fmt.Sprintf("%02d", shardID)}</span> @renderEcShardBadge(uint32(shardID), true)
} }
</td> </td>
</tr> </tr>
@@ -145,14 +146,19 @@ templ EcVolumeDetails(data dash.EcVolumeDetailsData) {
<h6>Present Shards:</h6> <h6>Present Shards:</h6>
<div class="d-flex flex-wrap gap-1"> <div class="d-flex flex-wrap gap-1">
for _, shard := range data.Shards { for _, shard := range data.Shards {
<span class="badge bg-success me-1 mb-1">{fmt.Sprintf("%02d", shard.ShardID)}</span> @renderEcShardBadge(shard.ShardID, false)
} }
</div> </div>
<div class="small text-muted mt-2">
<span class="badge bg-primary me-1">Data</span>
<span class="badge bg-warning text-dark me-2">Parity</span>
Data shards are blue, parity shards are yellow.
</div>
if len(data.MissingShards) > 0 { if len(data.MissingShards) > 0 {
<h6 class="mt-2">Missing Shards:</h6> <h6 class="mt-2">Missing Shards:</h6>
<div class="d-flex flex-wrap gap-1"> <div class="d-flex flex-wrap gap-1">
for _, shardID := range data.MissingShards { for _, shardID := range data.MissingShards {
<span class="badge bg-secondary me-1 mb-1">{fmt.Sprintf("%02d", shardID)}</span> @renderEcShardBadge(uint32(shardID), true)
} }
</div> </div>
} }
@@ -240,7 +246,7 @@ templ EcVolumeDetails(data dash.EcVolumeDetailsData) {
for _, shard := range data.Shards { for _, shard := range data.Shards {
<tr> <tr>
<td> <td>
<span class="badge bg-primary">{fmt.Sprintf("%02d", shard.ShardID)}</span> @renderEcShardBadge(shard.ShardID, false)
</td> </td>
<td> <td>
<a href={ templ.URL("/cluster/volume-servers/" + shard.Server) } class="text-primary text-decoration-none"> <a href={ templ.URL("/cluster/volume-servers/" + shard.Server) } class="text-primary text-decoration-none">
@@ -260,7 +266,7 @@ templ EcVolumeDetails(data dash.EcVolumeDetailsData) {
<span class="text-success">{bytesToHumanReadableUint64(shard.Size)}</span> <span class="text-success">{bytesToHumanReadableUint64(shard.Size)}</span>
</td> </td>
<td> <td>
<a href={ templ.SafeURL(fmt.Sprintf("http://%s/ui/index.html", shard.Server)) } target="_blank" class="btn btn-sm btn-primary"> <a href={ templ.SafeURL(fmt.Sprintf("http://%s/ui/index.html", shard.Server)) } target="_blank" rel="noopener noreferrer" class="btn btn-sm btn-primary">
<i class="fas fa-external-link-alt me-1"></i>Volume Server <i class="fas fa-external-link-alt me-1"></i>Volume Server
</a> </a>
</td> </td>
@@ -298,6 +304,22 @@ templ EcVolumeDetails(data dash.EcVolumeDetailsData) {
</script> </script>
} }
templ renderEcShardBadge(shardID uint32, missing bool) {
if shardID < erasure_coding.DataShardsCount {
if missing {
<span class="badge bg-primary opacity-50 me-1 mb-1" title={ fmt.Sprintf("Missing data shard %d", shardID) }>{ fmt.Sprintf("D%02d", shardID) }</span>
} else {
<span class="badge bg-primary me-1 mb-1" title={ fmt.Sprintf("Data shard %d", shardID) }>{ fmt.Sprintf("D%02d", shardID) }</span>
}
} else {
if missing {
<span class="badge bg-warning text-dark opacity-50 me-1 mb-1" title={ fmt.Sprintf("Missing parity shard %d", shardID) }>{ fmt.Sprintf("P%02d", shardID) }</span>
} else {
<span class="badge bg-warning text-dark me-1 mb-1" title={ fmt.Sprintf("Parity shard %d", shardID) }>{ fmt.Sprintf("P%02d", shardID) }</span>
}
}
}
// Helper function to convert bytes to human readable format (uint64 version) // Helper function to convert bytes to human readable format (uint64 version)
func bytesToHumanReadableUint64(bytes uint64) string { func bytesToHumanReadableUint64(bytes uint64) string {
const unit = 1024 const unit = 1024
@@ -310,4 +332,4 @@ func bytesToHumanReadableUint64(bytes uint64) string {
exp++ exp++
} }
return fmt.Sprintf("%.1f%cB", float64(bytes)/float64(div), "KMGTPE"[exp]) return fmt.Sprintf("%.1f%cB", float64(bytes)/float64(div), "KMGTPE"[exp])
} }

View File

@@ -11,6 +11,7 @@ import templruntime "github.com/a-h/templ/runtime"
import ( import (
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/admin/dash" "github.com/seaweedfs/seaweedfs/weed/admin/dash"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
) )
func EcVolumeDetails(data dash.EcVolumeDetailsData) templ.Component { func EcVolumeDetails(data dash.EcVolumeDetailsData) templ.Component {
@@ -41,7 +42,7 @@ func EcVolumeDetails(data dash.EcVolumeDetailsData) templ.Component {
var templ_7745c5c3_Var2 string var templ_7745c5c3_Var2 string
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.VolumeID)) templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.VolumeID))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 18, Col: 115} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 19, Col: 115}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
@@ -54,7 +55,7 @@ func EcVolumeDetails(data dash.EcVolumeDetailsData) templ.Component {
var templ_7745c5c3_Var3 string var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.VolumeID)) templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.VolumeID))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 47, Col: 65} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 48, Col: 65}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
@@ -72,7 +73,7 @@ func EcVolumeDetails(data dash.EcVolumeDetailsData) templ.Component {
var templ_7745c5c3_Var4 string var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.Collection) templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.Collection)
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 53, Col: 80} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 54, Col: 80}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
@@ -100,445 +101,585 @@ func EcVolumeDetails(data dash.EcVolumeDetailsData) templ.Component {
var templ_7745c5c3_Var5 string var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(data.TotalShards) templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(data.TotalShards)
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 64, Col: 100} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 65, Col: 100}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "/14 shards)</span>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "/")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "<span class=\"badge bg-warning\"><i class=\"fas fa-exclamation-triangle me-1\"></i>Incomplete (")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var6 string var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(data.TotalShards) templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", erasure_coding.TotalShardsCount))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 68, Col: 117} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 65, Col: 153}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "/14 shards)</span>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " shards)</span>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</td></tr>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "<span class=\"badge bg-warning\"><i class=\"fas fa-exclamation-triangle me-1\"></i>Incomplete (")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if !data.IsComplete {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "<tr><td><strong>Missing Shards:</strong></td><td>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
for i, shardID := range data.MissingShards { var templ_7745c5c3_Var7 string
if i > 0 { templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(data.TotalShards)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "<span>, </span>") if templ_7745c5c3_Err != nil {
if templ_7745c5c3_Err != nil { return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 69, Col: 117}
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, " <span class=\"badge bg-danger\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%02d", shardID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 81, Col: 99}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "</td></tr>") _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "/")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "<tr><td><strong>Data Centers:</strong></td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for i, dc := range data.DataCenters {
if i > 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "<span>, </span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, " <span class=\"badge bg-primary\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var8 string var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(dc) templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", erasure_coding.TotalShardsCount))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 93, Col: 70} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 69, Col: 170}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "</span>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, " shards)</span>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "</td></tr><tr><td><strong>Servers:</strong></td><td><span class=\"text-muted\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "</td></tr>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var9 string if !data.IsComplete {
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d servers", len(data.Servers))) templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "<tr><td><strong>Missing Shards:</strong></td><td>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 100, Col: 102} return templ_7745c5c3_Err
}
for i, shardID := range data.MissingShards {
if i > 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "<span>, </span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = renderEcShardBadge(uint32(shardID), true).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "<tr><td><strong>Data Centers:</strong></td><td>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</span></td></tr><tr><td><strong>Last Updated:</strong></td><td><span class=\"text-muted\">") for i, dc := range data.DataCenters {
if i > 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "<span>, </span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, " <span class=\"badge bg-primary\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var9 string
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(dc)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 94, Col: 70}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</td></tr><tr><td><strong>Servers:</strong></td><td><span class=\"text-muted\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var10 string var templ_7745c5c3_Var10 string
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d servers", len(data.Servers)))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 106, Col: 104} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 101, Col: 102}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</span></td></tr></table></div></div></div><div class=\"col-md-6\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"card-title mb-0\"><i class=\"fas fa-chart-pie me-2\"></i>Shard Distribution</h5></div><div class=\"card-body\"><div class=\"row text-center\"><div class=\"col-4\"><div class=\"border rounded p-3\"><h3 class=\"text-primary mb-1\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</span></td></tr><tr><td><strong>Last Updated:</strong></td><td><span class=\"text-muted\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var11 string var templ_7745c5c3_Var11 string
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalShards)) templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05"))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 125, Col: 98} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 107, Col: 104}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "</h3><small class=\"text-muted\">Total Shards</small></div></div><div class=\"col-4\"><div class=\"border rounded p-3\"><h3 class=\"text-success mb-1\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "</span></td></tr></table></div></div></div><div class=\"col-md-6\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"card-title mb-0\"><i class=\"fas fa-chart-pie me-2\"></i>Shard Distribution</h5></div><div class=\"card-body\"><div class=\"row text-center\"><div class=\"col-4\"><div class=\"border rounded p-3\"><h3 class=\"text-primary mb-1\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var12 string var templ_7745c5c3_Var12 string
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.DataCenters))) templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalShards))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 131, Col: 103} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 126, Col: 98}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "</h3><small class=\"text-muted\">Data Centers</small></div></div><div class=\"col-4\"><div class=\"border rounded p-3\"><h3 class=\"text-info mb-1\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "</h3><small class=\"text-muted\">Total Shards</small></div></div><div class=\"col-4\"><div class=\"border rounded p-3\"><h3 class=\"text-success mb-1\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var13 string var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Servers))) templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.DataCenters)))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 137, Col: 96} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 132, Col: 103}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "</h3><small class=\"text-muted\">Servers</small></div></div></div><!-- Shard Distribution Visualization --><div class=\"mt-3\"><h6>Present Shards:</h6><div class=\"d-flex flex-wrap gap-1\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "</h3><small class=\"text-muted\">Data Centers</small></div></div><div class=\"col-4\"><div class=\"border rounded p-3\"><h3 class=\"text-info mb-1\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Servers)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 138, Col: 96}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "</h3><small class=\"text-muted\">Servers</small></div></div></div><!-- Shard Distribution Visualization --><div class=\"mt-3\"><h6>Present Shards:</h6><div class=\"d-flex flex-wrap gap-1\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
for _, shard := range data.Shards { for _, shard := range data.Shards {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<span class=\"badge bg-success me-1 mb-1\">") templ_7745c5c3_Err = renderEcShardBadge(shard.ShardID, false).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%02d", shard.ShardID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 148, Col: 108}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "</span>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "</div>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "</div><div class=\"small text-muted mt-2\"><span class=\"badge bg-primary me-1\">Data</span> <span class=\"badge bg-warning text-dark me-2\">Parity</span> Data shards are blue, parity shards are yellow.</div>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
if len(data.MissingShards) > 0 { if len(data.MissingShards) > 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "<h6 class=\"mt-2\">Missing Shards:</h6><div class=\"d-flex flex-wrap gap-1\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "<h6 class=\"mt-2\">Missing Shards:</h6><div class=\"d-flex flex-wrap gap-1\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
for _, shardID := range data.MissingShards { for _, shardID := range data.MissingShards {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<span class=\"badge bg-secondary me-1 mb-1\">") templ_7745c5c3_Err = renderEcShardBadge(uint32(shardID), true).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%02d", shardID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 155, Col: 108}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</span>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "</div>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "</div>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "</div></div></div></div></div><!-- Shard Details Table --><div class=\"card\"><div class=\"card-header\"><h5 class=\"card-title mb-0\"><i class=\"fas fa-list me-2\"></i>Shard Details</h5></div><div class=\"card-body\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "</div></div></div></div></div><!-- Shard Details Table --><div class=\"card\"><div class=\"card-header\"><h5 class=\"card-title mb-0\"><i class=\"fas fa-list me-2\"></i>Shard Details</h5></div><div class=\"card-body\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
if len(data.Shards) > 0 { if len(data.Shards) > 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "<div class=\"table-responsive\"><table class=\"table table-striped table-hover\"><thead><tr><th><a href=\"#\" onclick=\"sortBy('shard_id')\" class=\"text-dark text-decoration-none\">Shard ID ") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "<div class=\"table-responsive\"><table class=\"table table-striped table-hover\"><thead><tr><th><a href=\"#\" onclick=\"sortBy('shard_id')\" class=\"text-dark text-decoration-none\">Shard ID ")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
if data.SortBy == "shard_id" { if data.SortBy == "shard_id" {
if data.SortOrder == "asc" { if data.SortOrder == "asc" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "<i class=\"fas fa-sort-up ms-1\"></i>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "<i class=\"fas fa-sort-up ms-1\"></i>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} else { } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "<i class=\"fas fa-sort-down ms-1\"></i>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "<i class=\"fas fa-sort-down ms-1\"></i>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
} else { } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "<i class=\"fas fa-sort ms-1 text-muted\"></i>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "</a></th><th><a href=\"#\" onclick=\"sortBy('server')\" class=\"text-dark text-decoration-none\">Server ") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "</a></th><th><a href=\"#\" onclick=\"sortBy('server')\" class=\"text-dark text-decoration-none\">Server ")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
if data.SortBy == "server" { if data.SortBy == "server" {
if data.SortOrder == "asc" { if data.SortOrder == "asc" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "<i class=\"fas fa-sort-up ms-1\"></i>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "<i class=\"fas fa-sort-up ms-1\"></i>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} else { } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "<i class=\"fas fa-sort-down ms-1\"></i>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "<i class=\"fas fa-sort-down ms-1\"></i>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
} else { } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "<i class=\"fas fa-sort ms-1 text-muted\"></i>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "</a></th><th><a href=\"#\" onclick=\"sortBy('data_center')\" class=\"text-dark text-decoration-none\">Data Center ") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "</a></th><th><a href=\"#\" onclick=\"sortBy('data_center')\" class=\"text-dark text-decoration-none\">Data Center ")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
if data.SortBy == "data_center" { if data.SortBy == "data_center" {
if data.SortOrder == "asc" { if data.SortOrder == "asc" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "<i class=\"fas fa-sort-up ms-1\"></i>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "<i class=\"fas fa-sort-up ms-1\"></i>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} else { } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "<i class=\"fas fa-sort-down ms-1\"></i>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "<i class=\"fas fa-sort-down ms-1\"></i>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
} else { } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "<i class=\"fas fa-sort ms-1 text-muted\"></i>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "</a></th><th><a href=\"#\" onclick=\"sortBy('rack')\" class=\"text-dark text-decoration-none\">Rack ") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "</a></th><th><a href=\"#\" onclick=\"sortBy('rack')\" class=\"text-dark text-decoration-none\">Rack ")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
if data.SortBy == "rack" { if data.SortBy == "rack" {
if data.SortOrder == "asc" { if data.SortOrder == "asc" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "<i class=\"fas fa-sort-up ms-1\"></i>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "<i class=\"fas fa-sort-up ms-1\"></i>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} else { } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "<i class=\"fas fa-sort-down ms-1\"></i>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "<i class=\"fas fa-sort-down ms-1\"></i>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
} else { } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "<i class=\"fas fa-sort ms-1 text-muted\"></i>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "<i class=\"fas fa-sort ms-1 text-muted\"></i>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "</a></th><th class=\"text-dark\">Disk Type</th><th class=\"text-dark\">Shard Size</th><th class=\"text-dark\">Actions</th></tr></thead> <tbody>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "</a></th><th class=\"text-dark\">Disk Type</th><th class=\"text-dark\">Shard Size</th><th class=\"text-dark\">Actions</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
for _, shard := range data.Shards { for _, shard := range data.Shards {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "<tr><td><span class=\"badge bg-primary\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "<tr><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = renderEcShardBadge(shard.ShardID, false).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "</td><td><a href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 templ.SafeURL
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinURLErrs(templ.URL("/cluster/volume-servers/" + shard.Server))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 252, Col: 106}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "\" class=\"text-primary text-decoration-none\"><code class=\"small\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var16 string var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%02d", shard.ShardID)) templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Server)
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 243, Col: 110} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 253, Col: 81}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "</span></td><td><a href=\"") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "</code></a></td><td><span class=\"badge bg-primary text-white\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var17 templ.SafeURL var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinURLErrs(templ.URL("/cluster/volume-servers/" + shard.Server)) templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(shard.DataCenter)
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 246, Col: 106} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 257, Col: 103}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "\" class=\"text-primary text-decoration-none\"><code class=\"small\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "</span></td><td><span class=\"badge bg-secondary text-white\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var18 string var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Server) templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Rack)
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 247, Col: 81} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 260, Col: 99}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "</code></a></td><td><span class=\"badge bg-primary text-white\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "</span></td><td><span class=\"text-dark\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var19 string var templ_7745c5c3_Var19 string
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(shard.DataCenter) templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(shard.DiskType)
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 251, Col: 103} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 263, Col: 83}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "</span></td><td><span class=\"badge bg-secondary text-white\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "</span></td><td><span class=\"text-success\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var20 string var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Rack) templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(bytesToHumanReadableUint64(shard.Size))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 254, Col: 99} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 266, Col: 110}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "</span></td><td><span class=\"text-dark\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "</span></td><td><a href=\"")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var21 string var templ_7745c5c3_Var21 templ.SafeURL
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(shard.DiskType) templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("http://%s/ui/index.html", shard.Server)))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 257, Col: 83} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 269, Col: 121}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "</span></td><td><span class=\"text-success\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "\" target=\"_blank\" rel=\"noopener noreferrer\" class=\"btn btn-sm btn-primary\"><i class=\"fas fa-external-link-alt me-1\"></i>Volume Server</a></td></tr>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var22 string }
templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(bytesToHumanReadableUint64(shard.Size)) templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "</tbody></table></div>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 260, Col: 110} return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "<div class=\"text-center py-4\"><i class=\"fas fa-exclamation-triangle fa-3x text-warning mb-3\"></i><h5>No EC shards found</h5><p class=\"text-muted\">This volume may not be EC encoded yet.</p></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "</div></div><script>\n // Sorting functionality\n function sortBy(field) {\n const currentSort = new URLSearchParams(window.location.search).get('sort_by');\n const currentOrder = new URLSearchParams(window.location.search).get('sort_order') || 'asc';\n \n let newOrder = 'asc';\n if (currentSort === field && currentOrder === 'asc') {\n newOrder = 'desc';\n }\n \n const url = new URL(window.location);\n url.searchParams.set('sort_by', field);\n url.searchParams.set('sort_order', newOrder);\n window.location.href = url.toString();\n }\n </script>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return nil
})
}
func renderEcShardBadge(shardID uint32, missing bool) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) }()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var22 := templ.GetChildren(ctx)
if templ_7745c5c3_Var22 == nil {
templ_7745c5c3_Var22 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
if shardID < erasure_coding.DataShardsCount {
if missing {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "<span class=\"badge bg-primary opacity-50 me-1 mb-1\" title=\"")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "</span></td><td><a href=\"") var templ_7745c5c3_Var23 string
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("Missing data shard %d", shardID))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 310, Col: 108}
}
var templ_7745c5c3_Var23 templ.SafeURL
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("http://%s/ui/index.html", shard.Server)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 263, Col: 121}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "\" target=\"_blank\" class=\"btn btn-sm btn-primary\"><i class=\"fas fa-external-link-alt me-1\"></i>Volume Server</a></td></tr>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var24 string
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("D%02d", shardID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 310, Col: 142}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "<span class=\"badge bg-primary me-1 mb-1\" title=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var25 string
templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("Data shard %d", shardID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 312, Col: 89}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var26 string
templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("D%02d", shardID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 312, Col: 123}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "</span>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "</tbody></table></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else { } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "<div class=\"text-center py-4\"><i class=\"fas fa-exclamation-triangle fa-3x text-warning mb-3\"></i><h5>No EC shards found</h5><p class=\"text-muted\">This volume may not be EC encoded yet.</p></div>") if missing {
if templ_7745c5c3_Err != nil { templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "<span class=\"badge bg-warning text-dark opacity-50 me-1 mb-1\" title=\"")
return templ_7745c5c3_Err if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var27 string
templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("Missing parity shard %d", shardID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 316, Col: 120}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var28 string
templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("P%02d", shardID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 316, Col: 154}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "<span class=\"badge bg-warning text-dark me-1 mb-1\" title=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var29 string
templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("Parity shard %d", shardID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 318, Col: 101}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var30 string
templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("P%02d", shardID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/ec_volume_details.templ`, Line: 318, Col: 135}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "</div></div><script>\n // Sorting functionality\n function sortBy(field) {\n const currentSort = new URLSearchParams(window.location.search).get('sort_by');\n const currentOrder = new URLSearchParams(window.location.search).get('sort_order') || 'asc';\n \n let newOrder = 'asc';\n if (currentSort === field && currentOrder === 'asc') {\n newOrder = 'desc';\n }\n \n const url = new URL(window.location);\n url.searchParams.set('sort_by', field);\n url.searchParams.set('sort_order', newOrder);\n window.location.href = url.toString();\n }\n </script>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return nil return nil
}) })
} }

View File

@@ -1,267 +0,0 @@
package app
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
)
templ MaintenanceConfig(data *maintenance.MaintenanceConfigData) {
<div class="container-fluid">
<div class="row mb-4">
<div class="col-12">
<div class="d-flex justify-content-between align-items-center">
<h2 class="mb-0">
<i class="fas fa-cog me-2"></i>
Maintenance Configuration
</h2>
<div class="btn-group">
<a href="/maintenance" class="btn btn-outline-secondary">
<i class="fas fa-arrow-left me-1"></i>
Back to Queue
</a>
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-12">
<div class="card">
<div class="card-header">
<h5 class="mb-0">System Settings</h5>
</div>
<div class="card-body">
<form>
<div class="mb-3">
<div class="form-check form-switch">
<input class="form-check-input" type="checkbox" id="enabled" checked?={data.IsEnabled}>
<label class="form-check-label" for="enabled">
<strong>Enable Maintenance System</strong>
</label>
</div>
<small class="form-text text-muted">
When enabled, the system will automatically scan for and execute maintenance tasks.
</small>
</div>
<div class="mb-3">
<label for="scanInterval" class="form-label">Scan Interval (minutes)</label>
<input type="number" class="form-control" id="scanInterval"
value={fmt.Sprintf("%.0f", float64(data.Config.ScanIntervalSeconds)/60)}
placeholder="30 (default)" min="1" max="1440">
<small class="form-text text-muted">
How often to scan for maintenance tasks (1-1440 minutes). <strong>Default: 30 minutes</strong>
</small>
</div>
<div class="mb-3">
<label for="workerTimeout" class="form-label">Worker Timeout (minutes)</label>
<input type="number" class="form-control" id="workerTimeout"
value={fmt.Sprintf("%.0f", float64(data.Config.WorkerTimeoutSeconds)/60)}
placeholder="5 (default)" min="1" max="60">
<small class="form-text text-muted">
How long to wait for worker heartbeat before considering it inactive (1-60 minutes). <strong>Default: 5 minutes</strong>
</small>
</div>
<div class="mb-3">
<label for="taskTimeout" class="form-label">Task Timeout (hours)</label>
<input type="number" class="form-control" id="taskTimeout"
value={fmt.Sprintf("%.0f", float64(data.Config.TaskTimeoutSeconds)/3600)}
placeholder="2 (default)" min="1" max="24">
<small class="form-text text-muted">
Maximum time allowed for a single task to complete (1-24 hours). <strong>Default: 2 hours</strong>
</small>
</div>
<div class="mb-3">
<label for="globalMaxConcurrent" class="form-label">Global Concurrent Limit</label>
<input type="number" class="form-control" id="globalMaxConcurrent"
value={fmt.Sprintf("%d", data.Config.Policy.GlobalMaxConcurrent)}
placeholder="4 (default)" min="1" max="20">
<small class="form-text text-muted">
Maximum number of maintenance tasks that can run simultaneously across all workers (1-20). <strong>Default: 4</strong>
</small>
</div>
<div class="mb-3">
<label for="maxRetries" class="form-label">Default Max Retries</label>
<input type="number" class="form-control" id="maxRetries"
value={fmt.Sprintf("%d", data.Config.MaxRetries)}
placeholder="3 (default)" min="0" max="10">
<small class="form-text text-muted">
Default number of times to retry failed tasks (0-10). <strong>Default: 3</strong>
</small>
</div>
<div class="mb-3">
<label for="retryDelay" class="form-label">Retry Delay (minutes)</label>
<input type="number" class="form-control" id="retryDelay"
value={fmt.Sprintf("%.0f", float64(data.Config.RetryDelaySeconds)/60)}
placeholder="15 (default)" min="1" max="120">
<small class="form-text text-muted">
Time to wait before retrying failed tasks (1-120 minutes). <strong>Default: 15 minutes</strong>
</small>
</div>
<div class="mb-3">
<label for="taskRetention" class="form-label">Task Retention (days)</label>
<input type="number" class="form-control" id="taskRetention"
value={fmt.Sprintf("%.0f", float64(data.Config.TaskRetentionSeconds)/(24*3600))}
placeholder="7 (default)" min="1" max="30">
<small class="form-text text-muted">
How long to keep completed/failed task records (1-30 days). <strong>Default: 7 days</strong>
</small>
</div>
<div class="d-flex gap-2">
<button type="button" class="btn btn-primary" onclick="saveConfiguration()">
<i class="fas fa-save me-1"></i>
Save Configuration
</button>
<button type="button" class="btn btn-secondary" onclick="resetToDefaults()">
<i class="fas fa-undo me-1"></i>
Reset to Defaults
</button>
</div>
</form>
</div>
</div>
</div>
</div>
<!-- Individual Task Configuration Menu -->
<div class="row mt-4">
<div class="col-12">
<div class="card">
<div class="card-header">
<h5 class="mb-0">
<i class="fas fa-cogs me-2"></i>
Task Configuration
</h5>
</div>
<div class="card-body">
<p class="text-muted mb-3">Configure specific settings for each maintenance task type.</p>
<div class="list-group">
for _, menuItem := range data.MenuItems {
<a href={templ.SafeURL(menuItem.Path)} class="list-group-item list-group-item-action">
<div class="d-flex w-100 justify-content-between">
<h6 class="mb-1">
<i class={menuItem.Icon + " me-2"}></i>
{menuItem.DisplayName}
</h6>
if menuItem.IsEnabled {
<span class="badge bg-success">Enabled</span>
} else {
<span class="badge bg-secondary">Disabled</span>
}
</div>
<p class="mb-1 small text-muted">{menuItem.Description}</p>
</a>
}
</div>
</div>
</div>
</div>
</div>
<!-- Statistics Overview -->
<div class="row mt-4">
<div class="col-12">
<div class="card">
<div class="card-header">
<h5 class="mb-0">System Statistics</h5>
</div>
<div class="card-body">
<div class="row">
<div class="col-md-3">
<div class="text-center">
<h6 class="text-muted">Last Scan</h6>
<p class="mb-0">{data.LastScanTime.Format("2006-01-02 15:04:05")}</p>
</div>
</div>
<div class="col-md-3">
<div class="text-center">
<h6 class="text-muted">Next Scan</h6>
<p class="mb-0">{data.NextScanTime.Format("2006-01-02 15:04:05")}</p>
</div>
</div>
<div class="col-md-3">
<div class="text-center">
<h6 class="text-muted">Total Tasks</h6>
<p class="mb-0">{fmt.Sprintf("%d", data.SystemStats.TotalTasks)}</p>
</div>
</div>
<div class="col-md-3">
<div class="text-center">
<h6 class="text-muted">Active Workers</h6>
<p class="mb-0">{fmt.Sprintf("%d", data.SystemStats.ActiveWorkers)}</p>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<script>
function saveConfiguration() {
// First, get current configuration to preserve existing values
fetch('/api/maintenance/config')
.then(response => response.json())
.then(currentConfig => {
// Update only the fields from the form
const updatedConfig = {
...currentConfig.config, // Preserve existing config
enabled: document.getElementById('enabled').checked,
scan_interval_seconds: parseInt(document.getElementById('scanInterval').value) * 60, // Convert to seconds
worker_timeout_seconds: parseInt(document.getElementById('workerTimeout').value) * 60, // Convert to seconds
task_timeout_seconds: parseInt(document.getElementById('taskTimeout').value) * 3600, // Convert to seconds
retry_delay_seconds: parseInt(document.getElementById('retryDelay').value) * 60, // Convert to seconds
max_retries: parseInt(document.getElementById('maxRetries').value),
task_retention_seconds: parseInt(document.getElementById('taskRetention').value) * 24 * 3600, // Convert to seconds
policy: {
...currentConfig.config.policy, // Preserve existing policy
global_max_concurrent: parseInt(document.getElementById('globalMaxConcurrent').value)
}
};
// Send the updated configuration
return fetch('/api/maintenance/config', {
method: 'PUT',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(updatedConfig)
});
})
.then(response => response.json())
.then(data => {
if (data.success) {
alert('Configuration saved successfully');
location.reload(); // Reload to show updated values
} else {
alert('Failed to save configuration: ' + (data.error || 'Unknown error'));
}
})
.catch(error => {
alert('Error: ' + error.message);
});
}
function resetToDefaults() {
showConfirm('Are you sure you want to reset to default configuration? This will overwrite your current settings.', function() {
// Reset form to defaults (matching DefaultMaintenanceConfig values)
document.getElementById('enabled').checked = false;
document.getElementById('scanInterval').value = '30';
document.getElementById('workerTimeout').value = '5';
document.getElementById('taskTimeout').value = '2';
document.getElementById('globalMaxConcurrent').value = '4';
document.getElementById('maxRetries').value = '3';
document.getElementById('retryDelay').value = '15';
document.getElementById('taskRetention').value = '7';
});
}
</script>
}

View File

@@ -1,383 +0,0 @@
package app
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
"github.com/seaweedfs/seaweedfs/weed/admin/config"
"github.com/seaweedfs/seaweedfs/weed/admin/view/components"
)
templ MaintenanceConfigSchema(data *maintenance.MaintenanceConfigData, schema *maintenance.MaintenanceConfigSchema) {
<div class="container-fluid">
<div class="row mb-4">
<div class="col-12">
<div class="d-flex justify-content-between align-items-center">
<h2 class="mb-0">
<i class="fas fa-cogs me-2"></i>
Maintenance Configuration
</h2>
<div class="btn-group">
<a href="/maintenance/tasks" class="btn btn-outline-primary">
<i class="fas fa-tasks me-1"></i>
View Tasks
</a>
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-12">
<div class="card">
<div class="card-header">
<h5 class="mb-0">System Settings</h5>
</div>
<div class="card-body">
<form id="maintenanceConfigForm">
<!-- Dynamically render all schema fields in order -->
for _, field := range schema.Fields {
@ConfigField(field, data.Config)
}
<div class="d-flex gap-2">
<button type="button" class="btn btn-primary" onclick="saveConfiguration()">
<i class="fas fa-save me-1"></i>
Save Configuration
</button>
<button type="button" class="btn btn-secondary" onclick="resetToDefaults()">
<i class="fas fa-undo me-1"></i>
Reset to Defaults
</button>
</div>
</form>
</div>
</div>
</div>
</div>
<!-- Task Configuration Cards -->
<div class="row mt-4">
<div class="col-md-4">
<div class="card">
<div class="card-header">
<h5 class="mb-0">
<i class="fas fa-broom me-2"></i>
Volume Vacuum
</h5>
</div>
<div class="card-body">
<p class="card-text">Reclaims disk space by removing deleted files from volumes.</p>
<a href="/maintenance/config/vacuum" class="btn btn-primary">Configure</a>
</div>
</div>
</div>
<div class="col-md-4">
<div class="card">
<div class="card-header">
<h5 class="mb-0">
<i class="fas fa-balance-scale me-2"></i>
Volume Balance
</h5>
</div>
<div class="card-body">
<p class="card-text">Redistributes volumes across servers to optimize storage utilization.</p>
<a href="/maintenance/config/balance" class="btn btn-primary">Configure</a>
</div>
</div>
</div>
<div class="col-md-4">
<div class="card">
<div class="card-header">
<h5 class="mb-0">
<i class="fas fa-shield-alt me-2"></i>
Erasure Coding
</h5>
</div>
<div class="card-body">
<p class="card-text">Converts volumes to erasure coded format for improved durability.</p>
<a href="/maintenance/config/erasure_coding" class="btn btn-primary">Configure</a>
</div>
</div>
</div>
</div>
</div>
<script>
function saveConfiguration() {
const form = document.getElementById('maintenanceConfigForm');
const formData = new FormData(form);
// Convert form data to JSON, handling interval fields specially
const config = {};
for (let [key, value] of formData.entries()) {
if (key.endsWith('_value')) {
// This is an interval value part
const baseKey = key.replace('_value', '');
const unitKey = baseKey + '_unit';
const unitValue = formData.get(unitKey);
if (unitValue) {
// Convert to seconds based on unit
const numValue = parseInt(value) || 0;
let seconds = numValue;
switch(unitValue) {
case 'minutes':
seconds = numValue * 60;
break;
case 'hours':
seconds = numValue * 3600;
break;
case 'days':
seconds = numValue * 24 * 3600;
break;
}
config[baseKey] = seconds;
}
} else if (key.endsWith('_unit')) {
// Skip unit keys - they're handled with their corresponding value
continue;
} else {
// Regular field
if (form.querySelector(`[name="${key}"]`).type === 'checkbox') {
config[key] = form.querySelector(`[name="${key}"]`).checked;
} else {
const numValue = parseFloat(value);
config[key] = isNaN(numValue) ? value : numValue;
}
}
}
fetch('/api/maintenance/config', {
method: 'PUT',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(config)
})
.then(response => {
if (response.status === 401) {
showAlert('Authentication required. Please log in first.', 'warning');
setTimeout(() => {
window.location.href = '/login';
}, 2000);
return;
}
return response.json();
})
.then(data => {
if (!data) return; // Skip if redirected to login
if (data.success) {
showAlert('Configuration saved successfully!', 'success');
location.reload();
} else {
showAlert('Error saving configuration: ' + (data.error || 'Unknown error'), 'error');
}
})
.catch(error => {
console.error('Error:', error);
showAlert('Error saving configuration: ' + error.message, 'error');
});
}
function resetToDefaults() {
showConfirm('Are you sure you want to reset to default configuration? This will overwrite your current settings.', function() {
fetch('/maintenance/config/defaults', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
showAlert('Configuration reset to defaults!', 'success');
location.reload();
} else {
showAlert('Error resetting configuration: ' + (data.error || 'Unknown error'), 'error');
}
})
.catch(error => {
console.error('Error:', error);
showAlert('Error resetting configuration: ' + error.message, 'error');
});
});
}
</script>
}
// ConfigField renders a single configuration field based on schema with typed value lookup
templ ConfigField(field *config.Field, config *maintenance.MaintenanceConfig) {
if field.InputType == "interval" {
<!-- Interval field with number input + unit dropdown -->
<div class="mb-3">
<label for={ field.JSONName } class="form-label">
{ field.DisplayName }
if field.Required {
<span class="text-danger">*</span>
}
</label>
<div class="input-group">
<input
type="number"
class="form-control"
id={ field.JSONName + "_value" }
name={ field.JSONName + "_value" }
value={ fmt.Sprintf("%.0f", components.ConvertInt32SecondsToDisplayValue(getMaintenanceInt32Field(config, field.JSONName))) }
step="1"
min="1"
if field.Required {
required
}
/>
<select
class="form-select"
id={ field.JSONName + "_unit" }
name={ field.JSONName + "_unit" }
style="max-width: 120px;"
if field.Required {
required
}
>
<option
value="minutes"
if components.GetInt32DisplayUnit(getMaintenanceInt32Field(config, field.JSONName)) == "minutes" {
selected
}
>
Minutes
</option>
<option
value="hours"
if components.GetInt32DisplayUnit(getMaintenanceInt32Field(config, field.JSONName)) == "hours" {
selected
}
>
Hours
</option>
<option
value="days"
if components.GetInt32DisplayUnit(getMaintenanceInt32Field(config, field.JSONName)) == "days" {
selected
}
>
Days
</option>
</select>
</div>
if field.Description != "" {
<div class="form-text text-muted">{ field.Description }</div>
}
</div>
} else if field.InputType == "checkbox" {
<!-- Checkbox field -->
<div class="mb-3">
<div class="form-check form-switch">
<input
class="form-check-input"
type="checkbox"
id={ field.JSONName }
name={ field.JSONName }
if getMaintenanceBoolField(config, field.JSONName) {
checked
}
/>
<label class="form-check-label" for={ field.JSONName }>
<strong>{ field.DisplayName }</strong>
</label>
</div>
if field.Description != "" {
<div class="form-text text-muted">{ field.Description }</div>
}
</div>
} else {
<!-- Number field -->
<div class="mb-3">
<label for={ field.JSONName } class="form-label">
{ field.DisplayName }
if field.Required {
<span class="text-danger">*</span>
}
</label>
<input
type="number"
class="form-control"
id={ field.JSONName }
name={ field.JSONName }
value={ fmt.Sprintf("%d", getMaintenanceInt32Field(config, field.JSONName)) }
placeholder={ field.Placeholder }
if field.MinValue != nil {
min={ fmt.Sprintf("%v", field.MinValue) }
}
if field.MaxValue != nil {
max={ fmt.Sprintf("%v", field.MaxValue) }
}
step={ getNumberStep(field) }
if field.Required {
required
}
/>
if field.Description != "" {
<div class="form-text text-muted">{ field.Description }</div>
}
</div>
}
}
// Helper functions for form field types
func getNumberStep(field *config.Field) string {
if field.Type == config.FieldTypeFloat {
return "0.01"
}
return "1"
}
// Typed field getters for MaintenanceConfig - no interface{} needed
func getMaintenanceInt32Field(config *maintenance.MaintenanceConfig, fieldName string) int32 {
if config == nil {
return 0
}
switch fieldName {
case "scan_interval_seconds":
return config.ScanIntervalSeconds
case "worker_timeout_seconds":
return config.WorkerTimeoutSeconds
case "task_timeout_seconds":
return config.TaskTimeoutSeconds
case "retry_delay_seconds":
return config.RetryDelaySeconds
case "max_retries":
return config.MaxRetries
case "cleanup_interval_seconds":
return config.CleanupIntervalSeconds
case "task_retention_seconds":
return config.TaskRetentionSeconds
case "global_max_concurrent":
if config.Policy != nil {
return config.Policy.GlobalMaxConcurrent
}
return 0
default:
return 0
}
}
func getMaintenanceBoolField(config *maintenance.MaintenanceConfig, fieldName string) bool {
if config == nil {
return false
}
switch fieldName {
case "enabled":
return config.Enabled
default:
return false
}
}
// Helper function to convert schema to JSON for JavaScript
templ schemaToJSON(schema *maintenance.MaintenanceConfigSchema) {
{`{}`}
}

File diff suppressed because one or more lines are too long

View File

@@ -1,284 +0,0 @@
// Code generated by templ - DO NOT EDIT.
// templ: version: v0.3.977
package app
//lint:file-ignore SA4006 This context is only used if a nested component is present.
import "github.com/a-h/templ"
import templruntime "github.com/a-h/templ/runtime"
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
)
func MaintenanceConfig(data *maintenance.MaintenanceConfigData) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var1 := templ.GetChildren(ctx)
if templ_7745c5c3_Var1 == nil {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"container-fluid\"><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"d-flex justify-content-between align-items-center\"><h2 class=\"mb-0\"><i class=\"fas fa-cog me-2\"></i> Maintenance Configuration</h2><div class=\"btn-group\"><a href=\"/maintenance\" class=\"btn btn-outline-secondary\"><i class=\"fas fa-arrow-left me-1\"></i> Back to Queue</a></div></div></div></div><div class=\"row\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\">System Settings</h5></div><div class=\"card-body\"><form><div class=\"mb-3\"><div class=\"form-check form-switch\"><input class=\"form-check-input\" type=\"checkbox\" id=\"enabled\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.IsEnabled {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, " checked")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "> <label class=\"form-check-label\" for=\"enabled\"><strong>Enable Maintenance System</strong></label></div><small class=\"form-text text-muted\">When enabled, the system will automatically scan for and execute maintenance tasks.</small></div><div class=\"mb-3\"><label for=\"scanInterval\" class=\"form-label\">Scan Interval (minutes)</label> <input type=\"number\" class=\"form-control\" id=\"scanInterval\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var2 string
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", float64(data.Config.ScanIntervalSeconds)/60))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 50, Col: 110}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "\" placeholder=\"30 (default)\" min=\"1\" max=\"1440\"> <small class=\"form-text text-muted\">How often to scan for maintenance tasks (1-1440 minutes). <strong>Default: 30 minutes</strong></small></div><div class=\"mb-3\"><label for=\"workerTimeout\" class=\"form-label\">Worker Timeout (minutes)</label> <input type=\"number\" class=\"form-control\" id=\"workerTimeout\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", float64(data.Config.WorkerTimeoutSeconds)/60))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 60, Col: 111}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "\" placeholder=\"5 (default)\" min=\"1\" max=\"60\"> <small class=\"form-text text-muted\">How long to wait for worker heartbeat before considering it inactive (1-60 minutes). <strong>Default: 5 minutes</strong></small></div><div class=\"mb-3\"><label for=\"taskTimeout\" class=\"form-label\">Task Timeout (hours)</label> <input type=\"number\" class=\"form-control\" id=\"taskTimeout\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", float64(data.Config.TaskTimeoutSeconds)/3600))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 70, Col: 111}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "\" placeholder=\"2 (default)\" min=\"1\" max=\"24\"> <small class=\"form-text text-muted\">Maximum time allowed for a single task to complete (1-24 hours). <strong>Default: 2 hours</strong></small></div><div class=\"mb-3\"><label for=\"globalMaxConcurrent\" class=\"form-label\">Global Concurrent Limit</label> <input type=\"number\" class=\"form-control\" id=\"globalMaxConcurrent\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Config.Policy.GlobalMaxConcurrent))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 80, Col: 103}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "\" placeholder=\"4 (default)\" min=\"1\" max=\"20\"> <small class=\"form-text text-muted\">Maximum number of maintenance tasks that can run simultaneously across all workers (1-20). <strong>Default: 4</strong></small></div><div class=\"mb-3\"><label for=\"maxRetries\" class=\"form-label\">Default Max Retries</label> <input type=\"number\" class=\"form-control\" id=\"maxRetries\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Config.MaxRetries))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 90, Col: 87}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "\" placeholder=\"3 (default)\" min=\"0\" max=\"10\"> <small class=\"form-text text-muted\">Default number of times to retry failed tasks (0-10). <strong>Default: 3</strong></small></div><div class=\"mb-3\"><label for=\"retryDelay\" class=\"form-label\">Retry Delay (minutes)</label> <input type=\"number\" class=\"form-control\" id=\"retryDelay\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", float64(data.Config.RetryDelaySeconds)/60))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 100, Col: 108}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "\" placeholder=\"15 (default)\" min=\"1\" max=\"120\"> <small class=\"form-text text-muted\">Time to wait before retrying failed tasks (1-120 minutes). <strong>Default: 15 minutes</strong></small></div><div class=\"mb-3\"><label for=\"taskRetention\" class=\"form-label\">Task Retention (days)</label> <input type=\"number\" class=\"form-control\" id=\"taskRetention\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", float64(data.Config.TaskRetentionSeconds)/(24*3600)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 110, Col: 118}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "\" placeholder=\"7 (default)\" min=\"1\" max=\"30\"> <small class=\"form-text text-muted\">How long to keep completed/failed task records (1-30 days). <strong>Default: 7 days</strong></small></div><div class=\"d-flex gap-2\"><button type=\"button\" class=\"btn btn-primary\" onclick=\"saveConfiguration()\"><i class=\"fas fa-save me-1\"></i> Save Configuration</button> <button type=\"button\" class=\"btn btn-secondary\" onclick=\"resetToDefaults()\"><i class=\"fas fa-undo me-1\"></i> Reset to Defaults</button></div></form></div></div></div></div><!-- Individual Task Configuration Menu --><div class=\"row mt-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\"><i class=\"fas fa-cogs me-2\"></i> Task Configuration</h5></div><div class=\"card-body\"><p class=\"text-muted mb-3\">Configure specific settings for each maintenance task type.</p><div class=\"list-group\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, menuItem := range data.MenuItems {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "<a href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var9 templ.SafeURL
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(menuItem.Path))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 147, Col: 69}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "\" class=\"list-group-item list-group-item-action\"><div class=\"d-flex w-100 justify-content-between\"><h6 class=\"mb-1\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var10 = []any{menuItem.Icon + " me-2"}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var10...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "<i class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var11 string
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var10).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "\"></i> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var12 string
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(menuItem.DisplayName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 151, Col: 65}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "</h6>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if menuItem.IsEnabled {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "<span class=\"badge bg-success\">Enabled</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "<span class=\"badge bg-secondary\">Disabled</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "</div><p class=\"mb-1 small text-muted\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(menuItem.Description)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 159, Col: 90}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "</p></a>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "</div></div></div></div></div><!-- Statistics Overview --><div class=\"row mt-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\">System Statistics</h5></div><div class=\"card-body\"><div class=\"row\"><div class=\"col-md-3\"><div class=\"text-center\"><h6 class=\"text-muted\">Last Scan</h6><p class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastScanTime.Format("2006-01-02 15:04:05"))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 180, Col: 100}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "</p></div></div><div class=\"col-md-3\"><div class=\"text-center\"><h6 class=\"text-muted\">Next Scan</h6><p class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(data.NextScanTime.Format("2006-01-02 15:04:05"))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 186, Col: 100}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "</p></div></div><div class=\"col-md-3\"><div class=\"text-center\"><h6 class=\"text-muted\">Total Tasks</h6><p class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.SystemStats.TotalTasks))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 192, Col: 99}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</p></div></div><div class=\"col-md-3\"><div class=\"text-center\"><h6 class=\"text-muted\">Active Workers</h6><p class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.SystemStats.ActiveWorkers))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_config.templ`, Line: 198, Col: 102}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</p></div></div></div></div></div></div></div></div><script>\n function saveConfiguration() {\n // First, get current configuration to preserve existing values\n fetch('/api/maintenance/config')\n .then(response => response.json())\n .then(currentConfig => {\n // Update only the fields from the form\n const updatedConfig = {\n ...currentConfig.config, // Preserve existing config\n enabled: document.getElementById('enabled').checked,\n scan_interval_seconds: parseInt(document.getElementById('scanInterval').value) * 60, // Convert to seconds\n worker_timeout_seconds: parseInt(document.getElementById('workerTimeout').value) * 60, // Convert to seconds\n task_timeout_seconds: parseInt(document.getElementById('taskTimeout').value) * 3600, // Convert to seconds\n retry_delay_seconds: parseInt(document.getElementById('retryDelay').value) * 60, // Convert to seconds\n max_retries: parseInt(document.getElementById('maxRetries').value),\n task_retention_seconds: parseInt(document.getElementById('taskRetention').value) * 24 * 3600, // Convert to seconds\n policy: {\n ...currentConfig.config.policy, // Preserve existing policy\n global_max_concurrent: parseInt(document.getElementById('globalMaxConcurrent').value)\n }\n };\n\n // Send the updated configuration\n return fetch('/api/maintenance/config', {\n method: 'PUT',\n headers: {\n 'Content-Type': 'application/json',\n },\n body: JSON.stringify(updatedConfig)\n });\n })\n .then(response => response.json())\n .then(data => {\n if (data.success) {\n alert('Configuration saved successfully');\n location.reload(); // Reload to show updated values\n } else {\n alert('Failed to save configuration: ' + (data.error || 'Unknown error'));\n }\n })\n .catch(error => {\n alert('Error: ' + error.message);\n });\n }\n\n function resetToDefaults() {\n showConfirm('Are you sure you want to reset to default configuration? This will overwrite your current settings.', function() {\n // Reset form to defaults (matching DefaultMaintenanceConfig values)\n document.getElementById('enabled').checked = false;\n document.getElementById('scanInterval').value = '30';\n document.getElementById('workerTimeout').value = '5';\n document.getElementById('taskTimeout').value = '2';\n document.getElementById('globalMaxConcurrent').value = '4';\n document.getElementById('maxRetries').value = '3';\n document.getElementById('retryDelay').value = '15';\n document.getElementById('taskRetention').value = '7';\n });\n }\n </script>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return nil
})
}
var _ = templruntime.GeneratedTemplate

View File

@@ -1,405 +0,0 @@
package app
import (
"fmt"
"time"
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
)
templ MaintenanceQueue(data *maintenance.MaintenanceQueueData) {
<div class="container-fluid">
<!-- Header -->
<div class="row mb-4">
<div class="col-12">
<div class="d-flex justify-content-between align-items-center">
<h2 class="mb-0">
<i class="fas fa-tasks me-2"></i>
Maintenance Queue
</h2>
<div class="btn-group">
<button type="button" class="btn btn-primary" onclick="triggerScan()">
<i class="fas fa-search me-1"></i>
Trigger Scan
</button>
<button type="button" class="btn btn-secondary" onclick="refreshPage()">
<i class="fas fa-sync-alt me-1"></i>
Refresh
</button>
</div>
</div>
</div>
</div>
<!-- Statistics Cards -->
<div class="row mb-4">
<div class="col-md-3">
<div class="card border-primary">
<div class="card-body text-center">
<i class="fas fa-clock fa-2x text-primary mb-2"></i>
<h4 class="mb-1">{fmt.Sprintf("%d", data.Stats.PendingTasks)}</h4>
<p class="text-muted mb-0">Pending Tasks</p>
</div>
</div>
</div>
<div class="col-md-3">
<div class="card border-warning">
<div class="card-body text-center">
<i class="fas fa-running fa-2x text-warning mb-2"></i>
<h4 class="mb-1">{fmt.Sprintf("%d", data.Stats.RunningTasks)}</h4>
<p class="text-muted mb-0">Running Tasks</p>
</div>
</div>
</div>
<div class="col-md-3">
<div class="card border-success">
<div class="card-body text-center">
<i class="fas fa-check-circle fa-2x text-success mb-2"></i>
<h4 class="mb-1">{fmt.Sprintf("%d", data.Stats.CompletedToday)}</h4>
<p class="text-muted mb-0">Completed Today</p>
</div>
</div>
</div>
<div class="col-md-3">
<div class="card border-danger">
<div class="card-body text-center">
<i class="fas fa-exclamation-triangle fa-2x text-danger mb-2"></i>
<h4 class="mb-1">{fmt.Sprintf("%d", data.Stats.FailedToday)}</h4>
<p class="text-muted mb-0">Failed Today</p>
</div>
</div>
</div>
</div>
<!-- Completed Tasks -->
<div class="row mb-4">
<div class="col-12">
<div class="card">
<div class="card-header bg-success text-white">
<h5 class="mb-0">
<i class="fas fa-check-circle me-2"></i>
Completed Tasks
</h5>
</div>
<div class="card-body">
if data.Stats.CompletedToday == 0 && data.Stats.FailedToday == 0 {
<div class="text-center text-muted py-4">
<i class="fas fa-check-circle fa-3x mb-3"></i>
<p>No completed maintenance tasks today</p>
<small>Completed tasks will appear here after workers finish processing them</small>
</div>
} else {
<div class="table-responsive">
<table class="table table-hover">
<thead>
<tr>
<th>Type</th>
<th>Status</th>
<th>Volume</th>
<th>Worker</th>
<th>Duration</th>
<th>Completed</th>
</tr>
</thead>
<tbody>
for _, task := range data.Tasks {
if string(task.Status) == "completed" || string(task.Status) == "failed" || string(task.Status) == "cancelled" {
if string(task.Status) == "failed" {
<tr class="table-danger clickable-row" data-task-id={task.ID} onclick="navigateToTask(this)" style="cursor: pointer;">
<td>
@TaskTypeIcon(task.Type)
{string(task.Type)}
</td>
<td>@StatusBadge(task.Status)</td>
<td>{fmt.Sprintf("%d", task.VolumeID)}</td>
<td>
if task.WorkerID != "" {
<small>{task.WorkerID}</small>
} else {
<span class="text-muted">-</span>
}
</td>
<td>
if task.StartedAt != nil && task.CompletedAt != nil {
{formatDuration(task.CompletedAt.Sub(*task.StartedAt))}
} else {
<span class="text-muted">-</span>
}
</td>
<td>
if task.CompletedAt != nil {
{task.CompletedAt.Format("2006-01-02 15:04")}
} else {
<span class="text-muted">-</span>
}
</td>
</tr>
} else {
<tr class="clickable-row" data-task-id={task.ID} onclick="navigateToTask(this)" style="cursor: pointer;">
<td>
@TaskTypeIcon(task.Type)
{string(task.Type)}
</td>
<td>@StatusBadge(task.Status)</td>
<td>{fmt.Sprintf("%d", task.VolumeID)}</td>
<td>
if task.WorkerID != "" {
<small>{task.WorkerID}</small>
} else {
<span class="text-muted">-</span>
}
</td>
<td>
if task.StartedAt != nil && task.CompletedAt != nil {
{formatDuration(task.CompletedAt.Sub(*task.StartedAt))}
} else {
<span class="text-muted">-</span>
}
</td>
<td>
if task.CompletedAt != nil {
{task.CompletedAt.Format("2006-01-02 15:04")}
} else {
<span class="text-muted">-</span>
}
</td>
</tr>
}
}
}
</tbody>
</table>
</div>
}
</div>
</div>
</div>
</div>
<!-- Pending Tasks -->
<div class="row mb-4">
<div class="col-12">
<div class="card">
<div class="card-header bg-primary text-white">
<h5 class="mb-0">
<i class="fas fa-clock me-2"></i>
Pending Tasks
</h5>
</div>
<div class="card-body">
if data.Stats.PendingTasks == 0 {
<div class="text-center text-muted py-4">
<i class="fas fa-clipboard-list fa-3x mb-3"></i>
<p>No pending maintenance tasks</p>
<small>Pending tasks will appear here when the system detects maintenance needs</small>
</div>
} else {
<div class="table-responsive">
<table class="table table-hover">
<thead>
<tr>
<th>Type</th>
<th>Priority</th>
<th>Volume</th>
<th>Server</th>
<th>Reason</th>
<th>Created</th>
</tr>
</thead>
<tbody>
for _, task := range data.Tasks {
if string(task.Status) == "pending" {
<tr class="clickable-row" data-task-id={task.ID} onclick="navigateToTask(this)" style="cursor: pointer;">
<td>
@TaskTypeIcon(task.Type)
{string(task.Type)}
</td>
<td>@PriorityBadge(task.Priority)</td>
<td>{fmt.Sprintf("%d", task.VolumeID)}</td>
<td><small>{task.Server}</small></td>
<td><small>{task.Reason}</small></td>
<td>{task.CreatedAt.Format("2006-01-02 15:04")}</td>
</tr>
}
}
</tbody>
</table>
</div>
}
</div>
</div>
</div>
</div>
<!-- Active Tasks -->
<div class="row mb-4">
<div class="col-12">
<div class="card">
<div class="card-header bg-warning text-dark">
<h5 class="mb-0">
<i class="fas fa-running me-2"></i>
Active Tasks
</h5>
</div>
<div class="card-body">
if data.Stats.RunningTasks == 0 {
<div class="text-center text-muted py-4">
<i class="fas fa-tasks fa-3x mb-3"></i>
<p>No active maintenance tasks</p>
<small>Active tasks will appear here when workers start processing them</small>
</div>
} else {
<div class="table-responsive">
<table class="table table-hover">
<thead>
<tr>
<th>Type</th>
<th>Status</th>
<th>Progress</th>
<th>Volume</th>
<th>Worker</th>
<th>Started</th>
</tr>
</thead>
<tbody>
for _, task := range data.Tasks {
if string(task.Status) == "assigned" || string(task.Status) == "in_progress" {
<tr class="clickable-row" data-task-id={task.ID} onclick="navigateToTask(this)" style="cursor: pointer;">
<td>
@TaskTypeIcon(task.Type)
{string(task.Type)}
</td>
<td>@StatusBadge(task.Status)</td>
<td>@ProgressBar(task.Progress, task.Status)</td>
<td>{fmt.Sprintf("%d", task.VolumeID)}</td>
<td>
if task.WorkerID != "" {
<small>{task.WorkerID}</small>
} else {
<span class="text-muted">-</span>
}
</td>
<td>
if task.StartedAt != nil {
{task.StartedAt.Format("2006-01-02 15:04")}
} else {
<span class="text-muted">-</span>
}
</td>
</tr>
}
}
</tbody>
</table>
</div>
}
</div>
</div>
</div>
</div>
</div>
<script>
window.triggerScan = function() {
console.log("triggerScan called");
fetch('/api/maintenance/scan', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
showToast('Success', 'Maintenance scan triggered successfully', 'success');
setTimeout(() => window.location.reload(), 2000);
} else {
showToast('Error', 'Failed to trigger scan: ' + (data.error || 'Unknown error'), 'danger');
}
})
.catch(error => {
showToast('Error', 'Error: ' + error.message, 'danger');
});
};
window.refreshPage = function() {
console.log("refreshPage called");
window.location.reload();
};
window.navigateToTask = function(element) {
const taskId = element.getAttribute('data-task-id');
if (taskId) {
window.location.href = '/maintenance/tasks/' + taskId;
}
};
</script>
}
// Helper components
templ TaskTypeIcon(taskType maintenance.MaintenanceTaskType) {
<i class={maintenance.GetTaskIcon(taskType) + " me-1"}></i>
}
templ PriorityBadge(priority maintenance.MaintenanceTaskPriority) {
switch priority {
case maintenance.PriorityCritical:
<span class="badge bg-danger">Critical</span>
case maintenance.PriorityHigh:
<span class="badge bg-warning">High</span>
case maintenance.PriorityNormal:
<span class="badge bg-primary">Normal</span>
case maintenance.PriorityLow:
<span class="badge bg-secondary">Low</span>
default:
<span class="badge bg-light text-dark">Unknown</span>
}
}
templ StatusBadge(status maintenance.MaintenanceTaskStatus) {
switch status {
case maintenance.TaskStatusPending:
<span class="badge bg-secondary">Pending</span>
case maintenance.TaskStatusAssigned:
<span class="badge bg-info">Assigned</span>
case maintenance.TaskStatusInProgress:
<span class="badge bg-warning">Running</span>
case maintenance.TaskStatusCompleted:
<span class="badge bg-success">Completed</span>
case maintenance.TaskStatusFailed:
<span class="badge bg-danger">Failed</span>
case maintenance.TaskStatusCancelled:
<span class="badge bg-light text-dark">Cancelled</span>
default:
<span class="badge bg-light text-dark">Unknown</span>
}
}
templ ProgressBar(progress float64, status maintenance.MaintenanceTaskStatus) {
if status == maintenance.TaskStatusInProgress || status == maintenance.TaskStatusAssigned {
<div class="progress" style="height: 8px; min-width: 100px;">
<div class="progress-bar" role="progressbar" style={fmt.Sprintf("width: %.1f%%", progress)}>
</div>
</div>
<small class="text-muted">{fmt.Sprintf("%.1f%%", progress)}</small>
} else if status == maintenance.TaskStatusCompleted {
<div class="progress" style="height: 8px; min-width: 100px;">
<div class="progress-bar bg-success" role="progressbar" style="width: 100%">
</div>
</div>
<small class="text-success">100%</small>
} else {
<span class="text-muted">-</span>
}
}
func formatDuration(d time.Duration) string {
if d < time.Minute {
return fmt.Sprintf("%.0fs", d.Seconds())
} else if d < time.Hour {
return fmt.Sprintf("%.1fm", d.Minutes())
} else {
return fmt.Sprintf("%.1fh", d.Hours())
}
}

View File

@@ -1,860 +0,0 @@
// Code generated by templ - DO NOT EDIT.
// templ: version: v0.3.977
package app
//lint:file-ignore SA4006 This context is only used if a nested component is present.
import "github.com/a-h/templ"
import templruntime "github.com/a-h/templ/runtime"
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
"time"
)
func MaintenanceQueue(data *maintenance.MaintenanceQueueData) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var1 := templ.GetChildren(ctx)
if templ_7745c5c3_Var1 == nil {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"container-fluid\"><!-- Header --><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"d-flex justify-content-between align-items-center\"><h2 class=\"mb-0\"><i class=\"fas fa-tasks me-2\"></i> Maintenance Queue</h2><div class=\"btn-group\"><button type=\"button\" class=\"btn btn-primary\" onclick=\"triggerScan()\"><i class=\"fas fa-search me-1\"></i> Trigger Scan</button> <button type=\"button\" class=\"btn btn-secondary\" onclick=\"refreshPage()\"><i class=\"fas fa-sync-alt me-1\"></i> Refresh</button></div></div></div></div><!-- Statistics Cards --><div class=\"row mb-4\"><div class=\"col-md-3\"><div class=\"card border-primary\"><div class=\"card-body text-center\"><i class=\"fas fa-clock fa-2x text-primary mb-2\"></i><h4 class=\"mb-1\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var2 string
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Stats.PendingTasks))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 39, Col: 84}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "</h4><p class=\"text-muted mb-0\">Pending Tasks</p></div></div></div><div class=\"col-md-3\"><div class=\"card border-warning\"><div class=\"card-body text-center\"><i class=\"fas fa-running fa-2x text-warning mb-2\"></i><h4 class=\"mb-1\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Stats.RunningTasks))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 48, Col: 84}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</h4><p class=\"text-muted mb-0\">Running Tasks</p></div></div></div><div class=\"col-md-3\"><div class=\"card border-success\"><div class=\"card-body text-center\"><i class=\"fas fa-check-circle fa-2x text-success mb-2\"></i><h4 class=\"mb-1\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Stats.CompletedToday))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 57, Col: 86}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "</h4><p class=\"text-muted mb-0\">Completed Today</p></div></div></div><div class=\"col-md-3\"><div class=\"card border-danger\"><div class=\"card-body text-center\"><i class=\"fas fa-exclamation-triangle fa-2x text-danger mb-2\"></i><h4 class=\"mb-1\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Stats.FailedToday))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 66, Col: 83}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</h4><p class=\"text-muted mb-0\">Failed Today</p></div></div></div></div><!-- Completed Tasks --><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header bg-success text-white\"><h5 class=\"mb-0\"><i class=\"fas fa-check-circle me-2\"></i> Completed Tasks</h5></div><div class=\"card-body\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.Stats.CompletedToday == 0 && data.Stats.FailedToday == 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<div class=\"text-center text-muted py-4\"><i class=\"fas fa-check-circle fa-3x mb-3\"></i><p>No completed maintenance tasks today</p><small>Completed tasks will appear here after workers finish processing them</small></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "<div class=\"table-responsive\"><table class=\"table table-hover\"><thead><tr><th>Type</th><th>Status</th><th>Volume</th><th>Worker</th><th>Duration</th><th>Completed</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, task := range data.Tasks {
if string(task.Status) == "completed" || string(task.Status) == "failed" || string(task.Status) == "cancelled" {
if string(task.Status) == "failed" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "<tr class=\"table-danger clickable-row\" data-task-id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(task.ID)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 107, Col: 112}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "\" onclick=\"navigateToTask(this)\" style=\"cursor: pointer;\"><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = TaskTypeIcon(task.Type).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 110, Col: 78}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = StatusBadge(task.Status).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 113, Col: 93}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if task.WorkerID != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "<small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var9 string
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(task.WorkerID)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 116, Col: 85}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "</small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "<span class=\"text-muted\">-</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if task.StartedAt != nil && task.CompletedAt != nil {
var templ_7745c5c3_Var10 string
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(formatDuration(task.CompletedAt.Sub(*task.StartedAt)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 123, Col: 118}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "<span class=\"text-muted\">-</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if task.CompletedAt != nil {
var templ_7745c5c3_Var11 string
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(task.CompletedAt.Format("2006-01-02 15:04"))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 130, Col: 108}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "<span class=\"text-muted\">-</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "<tr class=\"clickable-row\" data-task-id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var12 string
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(task.ID)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 137, Col: 99}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "\" onclick=\"navigateToTask(this)\" style=\"cursor: pointer;\"><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = TaskTypeIcon(task.Type).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 140, Col: 78}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = StatusBadge(task.Status).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 143, Col: 93}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if task.WorkerID != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "<small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(task.WorkerID)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 146, Col: 85}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "</small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<span class=\"text-muted\">-</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if task.StartedAt != nil && task.CompletedAt != nil {
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(formatDuration(task.CompletedAt.Sub(*task.StartedAt)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 153, Col: 118}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "<span class=\"text-muted\">-</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if task.CompletedAt != nil {
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(task.CompletedAt.Format("2006-01-02 15:04"))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 160, Col: 108}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<span class=\"text-muted\">-</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "</tbody></table></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "</div></div></div></div><!-- Pending Tasks --><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header bg-primary text-white\"><h5 class=\"mb-0\"><i class=\"fas fa-clock me-2\"></i> Pending Tasks</h5></div><div class=\"card-body\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.Stats.PendingTasks == 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "<div class=\"text-center text-muted py-4\"><i class=\"fas fa-clipboard-list fa-3x mb-3\"></i><p>No pending maintenance tasks</p><small>Pending tasks will appear here when the system detects maintenance needs</small></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "<div class=\"table-responsive\"><table class=\"table table-hover\"><thead><tr><th>Type</th><th>Priority</th><th>Volume</th><th>Server</th><th>Reason</th><th>Created</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, task := range data.Tasks {
if string(task.Status) == "pending" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "<tr class=\"clickable-row\" data-task-id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(task.ID)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 211, Col: 95}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "\" onclick=\"navigateToTask(this)\" style=\"cursor: pointer;\"><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = TaskTypeIcon(task.Type).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var19 string
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 214, Col: 74}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = PriorityBadge(task.Priority).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 217, Col: 89}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "</td><td><small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var21 string
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(task.Server)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 218, Col: 75}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "</small></td><td><small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var22 string
templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(task.Reason)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 219, Col: 75}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "</small></td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var23 string
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(task.CreatedAt.Format("2006-01-02 15:04"))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 220, Col: 98}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "</tbody></table></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "</div></div></div></div><!-- Active Tasks --><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header bg-warning text-dark\"><h5 class=\"mb-0\"><i class=\"fas fa-running me-2\"></i> Active Tasks</h5></div><div class=\"card-body\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if data.Stats.RunningTasks == 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "<div class=\"text-center text-muted py-4\"><i class=\"fas fa-tasks fa-3x mb-3\"></i><p>No active maintenance tasks</p><small>Active tasks will appear here when workers start processing them</small></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "<div class=\"table-responsive\"><table class=\"table table-hover\"><thead><tr><th>Type</th><th>Status</th><th>Progress</th><th>Volume</th><th>Worker</th><th>Started</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, task := range data.Tasks {
if string(task.Status) == "assigned" || string(task.Status) == "in_progress" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "<tr class=\"clickable-row\" data-task-id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var24 string
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(task.ID)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 266, Col: 95}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "\" onclick=\"navigateToTask(this)\" style=\"cursor: pointer;\"><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = TaskTypeIcon(task.Type).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var25 string
templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 269, Col: 74}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = StatusBadge(task.Status).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = ProgressBar(task.Progress, task.Status).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var26 string
templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 273, Col: 89}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if task.WorkerID != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "<small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var27 string
templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(task.WorkerID)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 276, Col: 81}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "</small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "<span class=\"text-muted\">-</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if task.StartedAt != nil {
var templ_7745c5c3_Var28 string
templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(task.StartedAt.Format("2006-01-02 15:04"))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 283, Col: 102}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "<span class=\"text-muted\">-</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "</tbody></table></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "</div></div></div></div></div><script>\n window.triggerScan = function() {\n console.log(\"triggerScan called\");\n fetch('/api/maintenance/scan', {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n }\n })\n .then(response => response.json())\n .then(data => {\n if (data.success) {\n showToast('Success', 'Maintenance scan triggered successfully', 'success');\n setTimeout(() => window.location.reload(), 2000);\n } else {\n showToast('Error', 'Failed to trigger scan: ' + (data.error || 'Unknown error'), 'danger');\n }\n })\n .catch(error => {\n showToast('Error', 'Error: ' + error.message, 'danger');\n });\n };\n\n window.refreshPage = function() {\n console.log(\"refreshPage called\");\n window.location.reload();\n };\n\n window.navigateToTask = function(element) {\n const taskId = element.getAttribute('data-task-id');\n if (taskId) {\n window.location.href = '/maintenance/tasks/' + taskId;\n }\n };\n </script>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return nil
})
}
// Helper components
func TaskTypeIcon(taskType maintenance.MaintenanceTaskType) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var29 := templ.GetChildren(ctx)
if templ_7745c5c3_Var29 == nil {
templ_7745c5c3_Var29 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
var templ_7745c5c3_Var30 = []any{maintenance.GetTaskIcon(taskType) + " me-1"}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var30...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "<i class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var31 string
templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var30).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return nil
})
}
func PriorityBadge(priority maintenance.MaintenanceTaskPriority) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var32 := templ.GetChildren(ctx)
if templ_7745c5c3_Var32 == nil {
templ_7745c5c3_Var32 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
switch priority {
case maintenance.PriorityCritical:
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "<span class=\"badge bg-danger\">Critical</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.PriorityHigh:
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "<span class=\"badge bg-warning\">High</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.PriorityNormal:
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "<span class=\"badge bg-primary\">Normal</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.PriorityLow:
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "<span class=\"badge bg-secondary\">Low</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
default:
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "<span class=\"badge bg-light text-dark\">Unknown</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
return nil
})
}
func StatusBadge(status maintenance.MaintenanceTaskStatus) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var33 := templ.GetChildren(ctx)
if templ_7745c5c3_Var33 == nil {
templ_7745c5c3_Var33 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
switch status {
case maintenance.TaskStatusPending:
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "<span class=\"badge bg-secondary\">Pending</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.TaskStatusAssigned:
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "<span class=\"badge bg-info\">Assigned</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.TaskStatusInProgress:
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "<span class=\"badge bg-warning\">Running</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.TaskStatusCompleted:
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "<span class=\"badge bg-success\">Completed</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.TaskStatusFailed:
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "<span class=\"badge bg-danger\">Failed</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
case maintenance.TaskStatusCancelled:
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "<span class=\"badge bg-light text-dark\">Cancelled</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
default:
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "<span class=\"badge bg-light text-dark\">Unknown</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
return nil
})
}
func ProgressBar(progress float64, status maintenance.MaintenanceTaskStatus) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var34 := templ.GetChildren(ctx)
if templ_7745c5c3_Var34 == nil {
templ_7745c5c3_Var34 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
if status == maintenance.TaskStatusInProgress || status == maintenance.TaskStatusAssigned {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "<div class=\"progress\" style=\"height: 8px; min-width: 100px;\"><div class=\"progress-bar\" role=\"progressbar\" style=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var35 string
templ_7745c5c3_Var35, templ_7745c5c3_Err = templruntime.SanitizeStyleAttributeValues(fmt.Sprintf("width: %.1f%%", progress))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 380, Col: 102}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 79, "\"></div></div><small class=\"text-muted\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var36 string
templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f%%", progress))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/maintenance_queue.templ`, Line: 383, Col: 66}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "</small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else if status == maintenance.TaskStatusCompleted {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 81, "<div class=\"progress\" style=\"height: 8px; min-width: 100px;\"><div class=\"progress-bar bg-success\" role=\"progressbar\" style=\"width: 100%\"></div></div><small class=\"text-success\">100%</small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 82, "<span class=\"text-muted\">-</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
return nil
})
}
func formatDuration(d time.Duration) string {
if d < time.Minute {
return fmt.Sprintf("%.0fs", d.Seconds())
} else if d < time.Hour {
return fmt.Sprintf("%.1fm", d.Minutes())
} else {
return fmt.Sprintf("%.1fh", d.Hours())
}
}
var _ = templruntime.GeneratedTemplate

View File

@@ -1,343 +0,0 @@
package app
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/admin/dash"
"time"
)
templ MaintenanceWorkers(data *dash.MaintenanceWorkersData) {
<div class="container-fluid">
<div class="row">
<div class="col-12">
<div class="d-flex justify-content-between align-items-center mb-4">
<div>
<h1 class="h3 mb-0 text-gray-800">Maintenance Workers</h1>
<p class="text-muted">Monitor and manage maintenance workers</p>
</div>
<div class="text-end">
<small class="text-muted">Last updated: { data.LastUpdated.Format("2006-01-02 15:04:05") }</small>
</div>
</div>
</div>
</div>
<!-- Summary Cards -->
<div class="row mb-4">
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-primary shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-primary text-uppercase mb-1">
Total Workers
</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">{ fmt.Sprintf("%d", len(data.Workers)) }</div>
</div>
<div class="col-auto">
<i class="fas fa-users fa-2x text-gray-300"></i>
</div>
</div>
</div>
</div>
</div>
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-success shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-success text-uppercase mb-1">
Active Workers
</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
{ fmt.Sprintf("%d", data.ActiveWorkers) }
</div>
</div>
<div class="col-auto">
<i class="fas fa-check-circle fa-2x text-gray-300"></i>
</div>
</div>
</div>
</div>
</div>
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-info shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-info text-uppercase mb-1">
Busy Workers
</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
{ fmt.Sprintf("%d", data.BusyWorkers) }
</div>
</div>
<div class="col-auto">
<i class="fas fa-spinner fa-2x text-gray-300"></i>
</div>
</div>
</div>
</div>
</div>
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-warning shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-warning text-uppercase mb-1">
Total Load
</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
{ fmt.Sprintf("%d", data.TotalLoad) }
</div>
</div>
<div class="col-auto">
<i class="fas fa-tasks fa-2x text-gray-300"></i>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Workers Table -->
<div class="row">
<div class="col-12">
<div class="card shadow mb-4">
<div class="card-header py-3">
<h6 class="m-0 font-weight-bold text-primary">Worker Details</h6>
</div>
<div class="card-body">
if len(data.Workers) == 0 {
<div class="text-center py-4">
<i class="fas fa-users fa-3x text-gray-300 mb-3"></i>
<h5 class="text-gray-600">No Workers Found</h5>
<p class="text-muted">No maintenance workers are currently registered.</p>
<div class="alert alert-info mt-3">
<strong>Tip:</strong> To start a worker, run:
<br><code>weed worker -admin=&lt;admin_server&gt; -capabilities=vacuum,ec,balance</code>
</div>
</div>
} else {
<div class="table-responsive">
<table class="table table-bordered table-hover" id="workersTable">
<thead class="table-light">
<tr>
<th>Worker ID</th>
<th>Address</th>
<th>Status</th>
<th>Capabilities</th>
<th>Load</th>
<th>Current Tasks</th>
<th>Performance</th>
<th>Last Heartbeat</th>
<th>Actions</th>
</tr>
</thead>
<tbody>
for _, worker := range data.Workers {
<tr>
<td>
<code>{ worker.Worker.ID }</code>
</td>
<td>
<code>{ worker.Worker.Address }</code>
</td>
<td>
if worker.Worker.Status == "active" {
<span class="badge bg-success">Active</span>
} else if worker.Worker.Status == "busy" {
<span class="badge bg-warning">Busy</span>
} else {
<span class="badge bg-danger">Inactive</span>
}
</td>
<td>
<div class="d-flex flex-wrap gap-1">
for _, capability := range worker.Worker.Capabilities {
<span class="badge bg-secondary rounded-pill">{ string(capability) }</span>
}
</div>
</td>
<td>
<div class="progress" style="height: 20px;">
if worker.Worker.MaxConcurrent > 0 {
<div class="progress-bar" role="progressbar"
style={ fmt.Sprintf("width: %d%%", (worker.Worker.CurrentLoad*100)/worker.Worker.MaxConcurrent) }
aria-valuenow={ fmt.Sprintf("%d", worker.Worker.CurrentLoad) }
aria-valuemin="0"
aria-valuemax={ fmt.Sprintf("%d", worker.Worker.MaxConcurrent) }>
{ fmt.Sprintf("%d/%d", worker.Worker.CurrentLoad, worker.Worker.MaxConcurrent) }
</div>
} else {
<div class="progress-bar" role="progressbar" style="width: 0%">0/0</div>
}
</div>
</td>
<td>
{ fmt.Sprintf("%d", len(worker.CurrentTasks)) }
</td>
<td>
<small>
<div>Completed: { fmt.Sprintf("%d", worker.Performance.TasksCompleted) }</div>
<div>Failed: { fmt.Sprintf("%d", worker.Performance.TasksFailed) }</div>
<div>Success Rate: { fmt.Sprintf("%.1f%%", worker.Performance.SuccessRate) }</div>
</small>
</td>
<td>
if time.Since(worker.Worker.LastHeartbeat) < 2*time.Minute {
<span class="text-success">
<i class="fas fa-heartbeat"></i>
{ worker.Worker.LastHeartbeat.Format("15:04:05") }
</span>
} else {
<span class="text-danger">
<i class="fas fa-exclamation-triangle"></i>
{ worker.Worker.LastHeartbeat.Format("15:04:05") }
</span>
}
</td>
<td>
<div class="btn-group btn-group-sm" role="group">
<button type="button" class="btn btn-outline-info" onclick="showWorkerDetails(event)" data-worker-id={ worker.Worker.ID }>
<i class="fas fa-info-circle"></i>
</button>
if worker.Worker.Status == "active" {
<button type="button" class="btn btn-outline-warning" onclick="pauseWorker(event)" data-worker-id={ worker.Worker.ID }>
<i class="fas fa-pause"></i>
</button>
}
</div>
</td>
</tr>
}
</tbody>
</table>
</div>
}
</div>
</div>
</div>
</div>
</div>
<!-- Worker Details Modal -->
<div class="modal fade" id="workerDetailsModal" tabindex="-1" aria-labelledby="workerDetailsModalLabel" aria-hidden="true">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="workerDetailsModalLabel">Worker Details</h5>
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
</div>
<div class="modal-body" id="workerDetailsContent">
<!-- Content will be loaded dynamically -->
</div>
</div>
</div>
</div>
<script>
function showWorkerDetails(event) {
const workerID = event.target.closest('button').getAttribute('data-worker-id');
// Show modal
var modal = new bootstrap.Modal(document.getElementById('workerDetailsModal'));
// Load worker details
const encodedWorkerId = encodeURIComponent(workerID);
fetch('/api/maintenance/workers/' + encodedWorkerId)
.then(response => response.json())
.then(data => {
const content = document.getElementById('workerDetailsContent');
content.innerHTML = '<div class="row">' +
'<div class="col-md-6">' +
'<h6>Worker Information</h6>' +
'<ul class="list-unstyled">' +
'<li><strong>ID:</strong> ' + data.worker.id + '</li>' +
'<li><strong>Address:</strong> ' + data.worker.address + '</li>' +
'<li><strong>Status:</strong> ' + data.worker.status + '</li>' +
'<li><strong>Max Concurrent:</strong> ' + data.worker.max_concurrent + '</li>' +
'<li><strong>Current Load:</strong> ' + data.worker.current_load + '</li>' +
'</ul>' +
'</div>' +
'<div class="col-md-6">' +
'<h6>Performance Metrics</h6>' +
'<ul class="list-unstyled">' +
'<li><strong>Tasks Completed:</strong> ' + data.performance.tasks_completed + '</li>' +
'<li><strong>Tasks Failed:</strong> ' + data.performance.tasks_failed + '</li>' +
'<li><strong>Success Rate:</strong> ' + data.performance.success_rate.toFixed(1) + '%</li>' +
'<li><strong>Average Task Time:</strong> ' + formatDuration(data.performance.average_task_time) + '</li>' +
'<li><strong>Uptime:</strong> ' + formatDuration(data.performance.uptime) + '</li>' +
'</ul>' +
'</div>' +
'</div>' +
'<hr>' +
'<h6>Current Tasks</h6>' +
(data.current_tasks === null || data.current_tasks.length === 0 ?
'<p class="text-muted">No current tasks</p>' :
data.current_tasks.map(task =>
'<div class="card mb-2">' +
'<div class="card-body py-2">' +
'<div class="d-flex justify-content-between">' +
'<span><strong>' + task.type + '</strong> - Volume ' + task.volume_id + '</span>' +
'<span class="badge bg-info">' + task.status + '</span>' +
'</div>' +
'<small class="text-muted">' + task.reason + '</small>' +
'</div>' +
'</div>'
).join('')
);
modal.show();
})
.catch(error => {
console.error('Error loading worker details:', error);
const content = document.getElementById('workerDetailsContent');
content.innerHTML = '<div class="alert alert-danger">Failed to load worker details</div>';
modal.show();
});
}
function pauseWorker(event) {
const workerID = event.target.closest('button').getAttribute('data-worker-id');
showConfirm(`Are you sure you want to pause worker ${workerID}?`, function() {
const encodedWorkerId = encodeURIComponent(workerID);
fetch('/api/maintenance/workers/' + encodedWorkerId + '/pause', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
location.reload();
} else {
showAlert('Failed to pause worker: ' + data.error, 'error');
}
})
.catch(error => {
console.error('Error pausing worker:', error);
showAlert('Failed to pause worker', 'error');
});
});
}
function formatDuration(nanoseconds) {
const seconds = Math.floor(nanoseconds / 1000000000);
const minutes = Math.floor(seconds / 60);
const hours = Math.floor(minutes / 60);
if (hours > 0) {
return hours + 'h ' + (minutes % 60) + 'm';
} else if (minutes > 0) {
return minutes + 'm ' + (seconds % 60) + 's';
} else {
return seconds + 's';
}
}
</script>
}

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -1,160 +0,0 @@
package app
import (
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
)
templ TaskConfig(data *maintenance.TaskConfigData) {
<div class="container-fluid">
<div class="row mb-4">
<div class="col-12">
<div class="d-flex justify-content-between align-items-center">
<h2 class="mb-0">
<i class={data.TaskIcon + " me-2"}></i>
{data.TaskName} Configuration
</h2>
<div class="btn-group">
<a href="/maintenance/config" class="btn btn-outline-secondary">
<i class="fas fa-arrow-left me-1"></i>
Back to Configuration
</a>
<a href="/maintenance" class="btn btn-outline-primary">
<i class="fas fa-list me-1"></i>
View Queue
</a>
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-12">
<div class="card">
<div class="card-header">
<h5 class="mb-0">
<i class={data.TaskIcon + " me-2"}></i>
{data.TaskName} Settings
</h5>
</div>
<div class="card-body">
<p class="text-muted mb-4">{data.Description}</p>
<!-- Task-specific configuration form -->
<form method="POST">
<div class="task-config-form">
@templ.Raw(string(data.ConfigFormHTML))
</div>
<hr class="my-4">
<div class="d-flex gap-2">
<button type="submit" class="btn btn-primary">
<i class="fas fa-save me-1"></i>
Save Configuration
</button>
<button type="button" class="btn btn-secondary" onclick="resetForm()">
<i class="fas fa-undo me-1"></i>
Reset to Defaults
</button>
<a href="/maintenance/config" class="btn btn-outline-secondary">
<i class="fas fa-times me-1"></i>
Cancel
</a>
</div>
</form>
</div>
</div>
</div>
</div>
<!-- Task Information -->
<div class="row mt-4">
<div class="col-12">
<div class="card">
<div class="card-header">
<h5 class="mb-0">
<i class="fas fa-info-circle me-2"></i>
Task Information
</h5>
</div>
<div class="card-body">
<div class="row">
<div class="col-md-6">
<h6 class="text-muted">Task Type</h6>
<p class="mb-3">
<span class="badge bg-secondary">{string(data.TaskType)}</span>
</p>
</div>
<div class="col-md-6">
<h6 class="text-muted">Display Name</h6>
<p class="mb-3">{data.TaskName}</p>
</div>
</div>
<div class="row">
<div class="col-12">
<h6 class="text-muted">Description</h6>
<p class="mb-0">{data.Description}</p>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<script>
function resetForm() {
showConfirm('Are you sure you want to reset all settings to their default values?', function() {
// Find all form inputs and reset them
const form = document.querySelector('form');
if (form) {
form.reset();
}
});
}
// Auto-save form data to localStorage for recovery
document.addEventListener('DOMContentLoaded', function() {
const form = document.querySelector('form');
if (form) {
const taskType = '{string(data.TaskType)}';
const storageKey = 'taskConfig_' + taskType;
// Load saved data
const savedData = localStorage.getItem(storageKey);
if (savedData) {
try {
const data = JSON.parse(savedData);
Object.keys(data).forEach(key => {
const input = form.querySelector(`[name="${key}"]`);
if (input) {
if (input.type === 'checkbox') {
input.checked = data[key];
} else {
input.value = data[key];
}
}
});
} catch (e) {
console.warn('Failed to load saved configuration:', e);
}
}
// Save data on input change
form.addEventListener('input', function() {
const formData = new FormData(form);
const data = {};
for (let [key, value] of formData.entries()) {
data[key] = value;
}
localStorage.setItem(storageKey, JSON.stringify(data));
});
// Clear saved data on successful submit
form.addEventListener('submit', function() {
localStorage.removeItem(storageKey);
});
}
});
</script>
}

View File

@@ -1,487 +0,0 @@
package app
import (
"encoding/base64"
"encoding/json"
"fmt"
"reflect"
"strings"
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/admin/config"
"github.com/seaweedfs/seaweedfs/weed/admin/view/components"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
)
// Helper function to convert task schema to JSON string
func taskSchemaToJSON(schema *tasks.TaskConfigSchema) string {
if schema == nil {
return "{}"
}
data := map[string]interface{}{
"fields": schema.Fields,
}
jsonBytes, err := json.Marshal(data)
if err != nil {
return "{}"
}
return string(jsonBytes)
}
// Helper function to base64 encode the JSON to avoid HTML escaping issues
func taskSchemaToBase64JSON(schema *tasks.TaskConfigSchema) string {
jsonStr := taskSchemaToJSON(schema)
return base64.StdEncoding.EncodeToString([]byte(jsonStr))
}
templ TaskConfigSchema(data *maintenance.TaskConfigData, schema *tasks.TaskConfigSchema, config interface{}) {
<div class="container-fluid">
<div class="row mb-4">
<div class="col-12">
<div class="d-flex justify-content-between align-items-center">
<h2 class="mb-0">
<i class={schema.Icon + " me-2"}></i>
{schema.DisplayName} Configuration
</h2>
<div class="btn-group">
<a href="/maintenance/config" class="btn btn-outline-secondary">
<i class="fas fa-arrow-left me-1"></i>
Back to System Config
</a>
</div>
</div>
</div>
</div>
<!-- Configuration Card -->
<div class="row">
<div class="col-12">
<div class="card">
<div class="card-header">
<h5 class="mb-0">
<i class="fas fa-cogs me-2"></i>
Task Configuration
</h5>
<p class="mb-0 text-muted small">{schema.Description}</p>
</div>
<div class="card-body">
<form id="taskConfigForm" method="POST">
<!-- Dynamically render all schema fields in defined order -->
for _, field := range schema.Fields {
@TaskConfigField(field, config)
}
<div class="d-flex gap-2">
<button type="submit" class="btn btn-primary">
<i class="fas fa-save me-1"></i>
Save Configuration
</button>
<button type="button" class="btn btn-secondary" onclick="resetToDefaults()">
<i class="fas fa-undo me-1"></i>
Reset to Defaults
</button>
</div>
</form>
</div>
</div>
</div>
</div>
<!-- Performance Notes Card -->
<div class="row mt-4">
<div class="col-12">
<div class="card">
<div class="card-header">
<h5 class="mb-0">
<i class="fas fa-info-circle me-2"></i>
Important Notes
</h5>
</div>
<div class="card-body">
<div class="alert alert-info" role="alert">
if schema.TaskName == "vacuum" {
<h6 class="alert-heading">Vacuum Operations:</h6>
<p class="mb-2"><strong>Performance:</strong> Vacuum operations are I/O intensive and may impact cluster performance.</p>
<p class="mb-2"><strong>Safety:</strong> Only volumes meeting age and garbage thresholds will be processed.</p>
<p class="mb-0"><strong>Recommendation:</strong> Monitor cluster load and adjust concurrent limits accordingly.</p>
} else if schema.TaskName == "balance" {
<h6 class="alert-heading">Balance Operations:</h6>
<p class="mb-2"><strong>Performance:</strong> Volume balancing involves data movement and can impact cluster performance.</p>
<p class="mb-2"><strong>Safety:</strong> Requires adequate server count to ensure data safety during moves.</p>
<p class="mb-0"><strong>Recommendation:</strong> Run during off-peak hours to minimize impact on production workloads.</p>
} else if schema.TaskName == "erasure_coding" {
<h6 class="alert-heading">Erasure Coding Operations:</h6>
<p class="mb-2"><strong>Performance:</strong> Erasure coding is CPU and I/O intensive. Consider running during off-peak hours.</p>
<p class="mb-2"><strong>Durability:</strong> With { fmt.Sprintf("%d+%d", erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount) } configuration, can tolerate up to { fmt.Sprintf("%d", erasure_coding.ParityShardsCount) } shard failures.</p>
<p class="mb-0"><strong>Configuration:</strong> Fullness ratio should be between 0.5 and 1.0 (e.g., 0.90 for 90%).</p>
}
</div>
</div>
</div>
</div>
</div>
</div>
<script>
function resetToDefaults() {
showConfirm('Are you sure you want to reset to default configuration? This will overwrite your current settings.', function() {
// Reset form fields to their default values
const form = document.getElementById('taskConfigForm');
const schemaFields = window.taskConfigSchema ? window.taskConfigSchema.fields : {};
Object.keys(schemaFields).forEach(fieldName => {
const field = schemaFields[fieldName];
const element = document.getElementById(fieldName);
if (element && field.default_value !== undefined) {
if (field.input_type === 'checkbox') {
element.checked = field.default_value;
} else if (field.input_type === 'interval') {
// Handle interval fields with value and unit
const valueElement = document.getElementById(fieldName + '_value');
const unitElement = document.getElementById(fieldName + '_unit');
if (valueElement && unitElement && field.default_value) {
const defaultSeconds = field.default_value;
const { value, unit } = convertSecondsToTaskIntervalValueUnit(defaultSeconds);
valueElement.value = value;
unitElement.value = unit;
}
} else {
element.value = field.default_value;
}
}
});
});
}
function convertSecondsToTaskIntervalValueUnit(totalSeconds) {
if (totalSeconds === 0) {
return { value: 0, unit: 'minutes' };
}
// Check if it's evenly divisible by days
if (totalSeconds % (24 * 3600) === 0) {
return { value: totalSeconds / (24 * 3600), unit: 'days' };
}
// Check if it's evenly divisible by hours
if (totalSeconds % 3600 === 0) {
return { value: totalSeconds / 3600, unit: 'hours' };
}
// Default to minutes
return { value: totalSeconds / 60, unit: 'minutes' };
}
// Store schema data for JavaScript access (moved to after div is created)
</script>
<!-- Hidden element to store schema data -->
<div data-task-schema={ taskSchemaToBase64JSON(schema) } style="display: none;"></div>
<script>
// Load schema data now that the div exists
const base64Data = document.querySelector('[data-task-schema]').getAttribute('data-task-schema');
const jsonStr = atob(base64Data);
window.taskConfigSchema = JSON.parse(jsonStr);
</script>
}
// TaskConfigField renders a single task configuration field based on schema with typed field lookup
templ TaskConfigField(field *config.Field, config interface{}) {
if field.InputType == "interval" {
<!-- Interval field with number input + unit dropdown -->
<div class="mb-3">
<label for={ field.JSONName } class="form-label">
{ field.DisplayName }
if field.Required {
<span class="text-danger">*</span>
}
</label>
<div class="input-group">
<input
type="number"
class="form-control"
id={ field.JSONName + "_value" }
name={ field.JSONName + "_value" }
value={ fmt.Sprintf("%.0f", components.ConvertInt32SecondsToDisplayValue(getTaskConfigInt32Field(config, field.JSONName))) }
step="1"
min="1"
if field.Required {
required
}
/>
<select
class="form-select"
id={ field.JSONName + "_unit" }
name={ field.JSONName + "_unit" }
style="max-width: 120px;"
if field.Required {
required
}
>
<option
value="minutes"
if components.GetInt32DisplayUnit(getTaskConfigInt32Field(config, field.JSONName)) == "minutes" {
selected
}
>
Minutes
</option>
<option
value="hours"
if components.GetInt32DisplayUnit(getTaskConfigInt32Field(config, field.JSONName)) == "hours" {
selected
}
>
Hours
</option>
<option
value="days"
if components.GetInt32DisplayUnit(getTaskConfigInt32Field(config, field.JSONName)) == "days" {
selected
}
>
Days
</option>
</select>
</div>
if field.Description != "" {
<div class="form-text text-muted">{ field.Description }</div>
}
</div>
} else if field.InputType == "checkbox" {
<!-- Checkbox field -->
<div class="mb-3">
<div class="form-check form-switch">
<input
class="form-check-input"
type="checkbox"
id={ field.JSONName }
name={ field.JSONName }
value="on"
if getTaskConfigBoolField(config, field.JSONName) {
checked
}
/>
<label class="form-check-label" for={ field.JSONName }>
<strong>{ field.DisplayName }</strong>
</label>
</div>
if field.Description != "" {
<div class="form-text text-muted">{ field.Description }</div>
}
</div>
} else if field.InputType == "text" {
<!-- Text field -->
<div class="mb-3">
<label for={ field.JSONName } class="form-label">
{ field.DisplayName }
if field.Required {
<span class="text-danger">*</span>
}
</label>
<input
type="text"
class="form-control"
id={ field.JSONName }
name={ field.JSONName }
value={ getTaskConfigStringField(config, field.JSONName) }
placeholder={ field.Placeholder }
if field.Required {
required
}
/>
if field.Description != "" {
<div class="form-text text-muted">{ field.Description }</div>
}
</div>
} else {
<!-- Number field -->
<div class="mb-3">
<label for={ field.JSONName } class="form-label">
{ field.DisplayName }
if field.Required {
<span class="text-danger">*</span>
}
</label>
<input
type="number"
class="form-control"
id={ field.JSONName }
name={ field.JSONName }
value={ fmt.Sprintf("%.6g", getTaskConfigFloatField(config, field.JSONName)) }
placeholder={ field.Placeholder }
if field.MinValue != nil {
min={ fmt.Sprintf("%v", field.MinValue) }
}
if field.MaxValue != nil {
max={ fmt.Sprintf("%v", field.MaxValue) }
}
step={ getTaskNumberStep(field) }
if field.Required {
required
}
/>
if field.Description != "" {
<div class="form-text text-muted">{ field.Description }</div>
}
</div>
}
}
// Typed field getters for task configs - avoiding interface{} where possible
func getTaskConfigBoolField(config interface{}, fieldName string) bool {
switch fieldName {
case "enabled":
// Use reflection only for the common 'enabled' field in BaseConfig
if value := getTaskFieldValue(config, fieldName); value != nil {
if boolVal, ok := value.(bool); ok {
return boolVal
}
}
return false
default:
// For other boolean fields, use reflection
if value := getTaskFieldValue(config, fieldName); value != nil {
if boolVal, ok := value.(bool); ok {
return boolVal
}
}
return false
}
}
func getTaskConfigInt32Field(config interface{}, fieldName string) int32 {
switch fieldName {
case "scan_interval_seconds", "max_concurrent":
// Common fields that should be int/int32
if value := getTaskFieldValue(config, fieldName); value != nil {
switch v := value.(type) {
case int32:
return v
case int:
return int32(v)
case int64:
return int32(v)
}
}
return 0
default:
// For other int fields, use reflection
if value := getTaskFieldValue(config, fieldName); value != nil {
switch v := value.(type) {
case int32:
return v
case int:
return int32(v)
case int64:
return int32(v)
case float64:
return int32(v)
}
}
return 0
}
}
func getTaskConfigFloatField(config interface{}, fieldName string) float64 {
if value := getTaskFieldValue(config, fieldName); value != nil {
switch v := value.(type) {
case float64:
return v
case float32:
return float64(v)
case int:
return float64(v)
case int32:
return float64(v)
case int64:
return float64(v)
}
}
return 0.0
}
func getTaskConfigStringField(config interface{}, fieldName string) string {
if value := getTaskFieldValue(config, fieldName); value != nil {
if strVal, ok := value.(string); ok {
return strVal
}
// Convert numbers to strings for form display
switch v := value.(type) {
case int:
return fmt.Sprintf("%d", v)
case int32:
return fmt.Sprintf("%d", v)
case int64:
return fmt.Sprintf("%d", v)
case float64:
return fmt.Sprintf("%.6g", v)
case float32:
return fmt.Sprintf("%.6g", v)
}
}
return ""
}
func getTaskNumberStep(field *config.Field) string {
if field.Type == config.FieldTypeFloat {
return "any"
}
return "1"
}
func getTaskFieldValue(config interface{}, fieldName string) interface{} {
if config == nil {
return nil
}
// Use reflection to get the field value from the config struct
configValue := reflect.ValueOf(config)
if configValue.Kind() == reflect.Ptr {
configValue = configValue.Elem()
}
if configValue.Kind() != reflect.Struct {
return nil
}
configType := configValue.Type()
for i := 0; i < configValue.NumField(); i++ {
field := configValue.Field(i)
fieldType := configType.Field(i)
// Handle embedded structs recursively (before JSON tag check)
if field.Kind() == reflect.Struct && fieldType.Anonymous {
if value := getTaskFieldValue(field.Interface(), fieldName); value != nil {
return value
}
continue
}
// Get JSON tag name
jsonTag := fieldType.Tag.Get("json")
if jsonTag == "" {
continue
}
// Remove options like ",omitempty"
if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 {
jsonTag = jsonTag[:commaIdx]
}
// Check if this is the field we're looking for
if jsonTag == fieldName {
return field.Interface()
}
}
return nil
}

View File

@@ -1,948 +0,0 @@
// Code generated by templ - DO NOT EDIT.
// templ: version: v0.3.977
package app
//lint:file-ignore SA4006 This context is only used if a nested component is present.
import "github.com/a-h/templ"
import templruntime "github.com/a-h/templ/runtime"
import (
"encoding/base64"
"encoding/json"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/admin/config"
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
"github.com/seaweedfs/seaweedfs/weed/admin/view/components"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"reflect"
"strings"
)
// Helper function to convert task schema to JSON string
func taskSchemaToJSON(schema *tasks.TaskConfigSchema) string {
if schema == nil {
return "{}"
}
data := map[string]interface{}{
"fields": schema.Fields,
}
jsonBytes, err := json.Marshal(data)
if err != nil {
return "{}"
}
return string(jsonBytes)
}
// Helper function to base64 encode the JSON to avoid HTML escaping issues
func taskSchemaToBase64JSON(schema *tasks.TaskConfigSchema) string {
jsonStr := taskSchemaToJSON(schema)
return base64.StdEncoding.EncodeToString([]byte(jsonStr))
}
func TaskConfigSchema(data *maintenance.TaskConfigData, schema *tasks.TaskConfigSchema, config interface{}) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var1 := templ.GetChildren(ctx)
if templ_7745c5c3_Var1 == nil {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"container-fluid\"><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"d-flex justify-content-between align-items-center\"><h2 class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var2 = []any{schema.Icon + " me-2"}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var2...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<i class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var2).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "\"></i> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(schema.DisplayName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 47, Col: 43}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, " Configuration</h2><div class=\"btn-group\"><a href=\"/maintenance/config\" class=\"btn btn-outline-secondary\"><i class=\"fas fa-arrow-left me-1\"></i> Back to System Config</a></div></div></div></div><!-- Configuration Card --><div class=\"row\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\"><i class=\"fas fa-cogs me-2\"></i> Task Configuration</h5><p class=\"mb-0 text-muted small\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(schema.Description)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 68, Col: 76}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</p></div><div class=\"card-body\"><form id=\"taskConfigForm\" method=\"POST\"><!-- Dynamically render all schema fields in defined order -->")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, field := range schema.Fields {
templ_7745c5c3_Err = TaskConfigField(field, config).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<div class=\"d-flex gap-2\"><button type=\"submit\" class=\"btn btn-primary\"><i class=\"fas fa-save me-1\"></i> Save Configuration</button> <button type=\"button\" class=\"btn btn-secondary\" onclick=\"resetToDefaults()\"><i class=\"fas fa-undo me-1\"></i> Reset to Defaults</button></div></form></div></div></div></div><!-- Performance Notes Card --><div class=\"row mt-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\"><i class=\"fas fa-info-circle me-2\"></i> Important Notes</h5></div><div class=\"card-body\"><div class=\"alert alert-info\" role=\"alert\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if schema.TaskName == "vacuum" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "<h6 class=\"alert-heading\">Vacuum Operations:</h6><p class=\"mb-2\"><strong>Performance:</strong> Vacuum operations are I/O intensive and may impact cluster performance.</p><p class=\"mb-2\"><strong>Safety:</strong> Only volumes meeting age and garbage thresholds will be processed.</p><p class=\"mb-0\"><strong>Recommendation:</strong> Monitor cluster load and adjust concurrent limits accordingly.</p>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else if schema.TaskName == "balance" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "<h6 class=\"alert-heading\">Balance Operations:</h6><p class=\"mb-2\"><strong>Performance:</strong> Volume balancing involves data movement and can impact cluster performance.</p><p class=\"mb-2\"><strong>Safety:</strong> Requires adequate server count to ensure data safety during moves.</p><p class=\"mb-0\"><strong>Recommendation:</strong> Run during off-peak hours to minimize impact on production workloads.</p>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else if schema.TaskName == "erasure_coding" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "<h6 class=\"alert-heading\">Erasure Coding Operations:</h6><p class=\"mb-2\"><strong>Performance:</strong> Erasure coding is CPU and I/O intensive. Consider running during off-peak hours.</p><p class=\"mb-2\"><strong>Durability:</strong> With ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d+%d", erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 118, Col: 170}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " configuration, can tolerate up to ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", erasure_coding.ParityShardsCount))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 118, Col: 260}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, " shard failures.</p><p class=\"mb-0\"><strong>Configuration:</strong> Fullness ratio should be between 0.5 and 1.0 (e.g., 0.90 for 90%).</p>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</div></div></div></div></div></div><script>\n function resetToDefaults() {\n showConfirm('Are you sure you want to reset to default configuration? This will overwrite your current settings.', function() {\n // Reset form fields to their default values\n const form = document.getElementById('taskConfigForm');\n const schemaFields = window.taskConfigSchema ? window.taskConfigSchema.fields : {};\n \n Object.keys(schemaFields).forEach(fieldName => {\n const field = schemaFields[fieldName];\n const element = document.getElementById(fieldName);\n \n if (element && field.default_value !== undefined) {\n if (field.input_type === 'checkbox') {\n element.checked = field.default_value;\n } else if (field.input_type === 'interval') {\n // Handle interval fields with value and unit\n const valueElement = document.getElementById(fieldName + '_value');\n const unitElement = document.getElementById(fieldName + '_unit');\n if (valueElement && unitElement && field.default_value) {\n const defaultSeconds = field.default_value;\n const { value, unit } = convertSecondsToTaskIntervalValueUnit(defaultSeconds);\n valueElement.value = value;\n unitElement.value = unit;\n }\n } else {\n element.value = field.default_value;\n }\n }\n });\n });\n }\n\n function convertSecondsToTaskIntervalValueUnit(totalSeconds) {\n if (totalSeconds === 0) {\n return { value: 0, unit: 'minutes' };\n }\n\n // Check if it's evenly divisible by days\n if (totalSeconds % (24 * 3600) === 0) {\n return { value: totalSeconds / (24 * 3600), unit: 'days' };\n }\n\n // Check if it's evenly divisible by hours\n if (totalSeconds % 3600 === 0) {\n return { value: totalSeconds / 3600, unit: 'hours' };\n }\n\n // Default to minutes\n return { value: totalSeconds / 60, unit: 'minutes' };\n }\n\n // Store schema data for JavaScript access (moved to after div is created)\n </script><!-- Hidden element to store schema data --><div data-task-schema=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(taskSchemaToBase64JSON(schema))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 183, Col: 58}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "\" style=\"display: none;\"></div><script>\n // Load schema data now that the div exists\n const base64Data = document.querySelector('[data-task-schema]').getAttribute('data-task-schema');\n const jsonStr = atob(base64Data);\n window.taskConfigSchema = JSON.parse(jsonStr);\n </script>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return nil
})
}
// TaskConfigField renders a single task configuration field based on schema with typed field lookup
func TaskConfigField(field *config.Field, config interface{}) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var9 := templ.GetChildren(ctx)
if templ_7745c5c3_Var9 == nil {
templ_7745c5c3_Var9 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
if field.InputType == "interval" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "<!-- Interval field with number input + unit dropdown --> <div class=\"mb-3\"><label for=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var10 string
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 198, Col: 39}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "\" class=\"form-label\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var11 string
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(field.DisplayName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 199, Col: 35}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if field.Required {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "<span class=\"text-danger\">*</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "</label><div class=\"input-group\"><input type=\"number\" class=\"form-control\" id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var12 string
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName + "_value")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 208, Col: 50}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "\" name=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName + "_value")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 209, Col: 52}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f", components.ConvertInt32SecondsToDisplayValue(getTaskConfigInt32Field(config, field.JSONName))))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 210, Col: 142}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "\" step=\"1\" min=\"1\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if field.Required {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, " required")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "> <select class=\"form-select\" id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName + "_unit")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 219, Col: 49}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "\" name=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName + "_unit")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 220, Col: 51}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "\" style=\"max-width: 120px;\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if field.Required {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, " required")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "><option value=\"minutes\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if components.GetInt32DisplayUnit(getTaskConfigInt32Field(config, field.JSONName)) == "minutes" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, " selected")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, ">Minutes</option> <option value=\"hours\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if components.GetInt32DisplayUnit(getTaskConfigInt32Field(config, field.JSONName)) == "hours" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, " selected")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, ">Hours</option> <option value=\"days\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if components.GetInt32DisplayUnit(getTaskConfigInt32Field(config, field.JSONName)) == "days" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, " selected")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, ">Days</option></select></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if field.Description != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "<div class=\"form-text text-muted\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 253, Col: 69}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "</div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "</div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else if field.InputType == "checkbox" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "<!-- Checkbox field --> <div class=\"mb-3\"><div class=\"form-check form-switch\"><input class=\"form-check-input\" type=\"checkbox\" id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 263, Col: 39}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "\" name=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var19 string
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 264, Col: 41}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "\" value=\"on\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if getTaskConfigBoolField(config, field.JSONName) {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, " checked")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "> <label class=\"form-check-label\" for=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 270, Col: 68}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "\"><strong>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var21 string
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(field.DisplayName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 271, Col: 47}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "</strong></label></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if field.Description != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "<div class=\"form-text text-muted\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var22 string
templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 275, Col: 69}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "</div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "</div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else if field.InputType == "text" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "<!-- Text field --> <div class=\"mb-3\"><label for=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var23 string
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 281, Col: 39}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "\" class=\"form-label\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var24 string
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(field.DisplayName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 282, Col: 35}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if field.Required {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "<span class=\"text-danger\">*</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "</label> <input type=\"text\" class=\"form-control\" id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var25 string
templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 290, Col: 35}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "\" name=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var26 string
templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 291, Col: 37}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var27 string
templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(getTaskConfigStringField(config, field.JSONName))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 292, Col: 72}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "\" placeholder=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var28 string
templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(field.Placeholder)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 293, Col: 47}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if field.Required {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, " required")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if field.Description != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "<div class=\"form-text text-muted\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var29 string
templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 299, Col: 69}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "</div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "</div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "<!-- Number field --> <div class=\"mb-3\"><label for=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var30 string
templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 305, Col: 39}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "\" class=\"form-label\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var31 string
templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs(field.DisplayName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 306, Col: 35}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, " ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if field.Required {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "<span class=\"text-danger\">*</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "</label> <input type=\"number\" class=\"form-control\" id=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var32 string
templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 314, Col: 35}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "\" name=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var33 string
templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(field.JSONName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 315, Col: 37}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var34 string
templ_7745c5c3_Var34, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.6g", getTaskConfigFloatField(config, field.JSONName)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 316, Col: 92}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var34))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "\" placeholder=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var35 string
templ_7745c5c3_Var35, templ_7745c5c3_Err = templ.JoinStringErrs(field.Placeholder)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 317, Col: 47}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if field.MinValue != nil {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, " min=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var36 string
templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%v", field.MinValue))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 319, Col: 59}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
if field.MaxValue != nil {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, " max=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var37 string
templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%v", field.MaxValue))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 322, Col: 59}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, " step=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var38 string
templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(getTaskNumberStep(field))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 324, Col: 47}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if field.Required {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, " required")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if field.Description != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "<div class=\"form-text text-muted\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var39 string
templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_schema.templ`, Line: 330, Col: 69}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 79, "</div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "</div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
return nil
})
}
// Typed field getters for task configs - avoiding interface{} where possible
func getTaskConfigBoolField(config interface{}, fieldName string) bool {
switch fieldName {
case "enabled":
// Use reflection only for the common 'enabled' field in BaseConfig
if value := getTaskFieldValue(config, fieldName); value != nil {
if boolVal, ok := value.(bool); ok {
return boolVal
}
}
return false
default:
// For other boolean fields, use reflection
if value := getTaskFieldValue(config, fieldName); value != nil {
if boolVal, ok := value.(bool); ok {
return boolVal
}
}
return false
}
}
func getTaskConfigInt32Field(config interface{}, fieldName string) int32 {
switch fieldName {
case "scan_interval_seconds", "max_concurrent":
// Common fields that should be int/int32
if value := getTaskFieldValue(config, fieldName); value != nil {
switch v := value.(type) {
case int32:
return v
case int:
return int32(v)
case int64:
return int32(v)
}
}
return 0
default:
// For other int fields, use reflection
if value := getTaskFieldValue(config, fieldName); value != nil {
switch v := value.(type) {
case int32:
return v
case int:
return int32(v)
case int64:
return int32(v)
case float64:
return int32(v)
}
}
return 0
}
}
func getTaskConfigFloatField(config interface{}, fieldName string) float64 {
if value := getTaskFieldValue(config, fieldName); value != nil {
switch v := value.(type) {
case float64:
return v
case float32:
return float64(v)
case int:
return float64(v)
case int32:
return float64(v)
case int64:
return float64(v)
}
}
return 0.0
}
func getTaskConfigStringField(config interface{}, fieldName string) string {
if value := getTaskFieldValue(config, fieldName); value != nil {
if strVal, ok := value.(string); ok {
return strVal
}
// Convert numbers to strings for form display
switch v := value.(type) {
case int:
return fmt.Sprintf("%d", v)
case int32:
return fmt.Sprintf("%d", v)
case int64:
return fmt.Sprintf("%d", v)
case float64:
return fmt.Sprintf("%.6g", v)
case float32:
return fmt.Sprintf("%.6g", v)
}
}
return ""
}
func getTaskNumberStep(field *config.Field) string {
if field.Type == config.FieldTypeFloat {
return "any"
}
return "1"
}
func getTaskFieldValue(config interface{}, fieldName string) interface{} {
if config == nil {
return nil
}
// Use reflection to get the field value from the config struct
configValue := reflect.ValueOf(config)
if configValue.Kind() == reflect.Ptr {
configValue = configValue.Elem()
}
if configValue.Kind() != reflect.Struct {
return nil
}
configType := configValue.Type()
for i := 0; i < configValue.NumField(); i++ {
field := configValue.Field(i)
fieldType := configType.Field(i)
// Handle embedded structs recursively (before JSON tag check)
if field.Kind() == reflect.Struct && fieldType.Anonymous {
if value := getTaskFieldValue(field.Interface(), fieldName); value != nil {
return value
}
continue
}
// Get JSON tag name
jsonTag := fieldType.Tag.Get("json")
if jsonTag == "" {
continue
}
// Remove options like ",omitempty"
if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 {
jsonTag = jsonTag[:commaIdx]
}
// Check if this is the field we're looking for
if jsonTag == fieldName {
return field.Interface()
}
}
return nil
}
var _ = templruntime.GeneratedTemplate

View File

@@ -1,232 +0,0 @@
package app
import (
"testing"
)
// Test structs that mirror the actual configuration structure
type TestBaseConfigForTemplate struct {
Enabled bool `json:"enabled"`
ScanIntervalSeconds int `json:"scan_interval_seconds"`
MaxConcurrent int `json:"max_concurrent"`
}
type TestTaskConfigForTemplate struct {
TestBaseConfigForTemplate
TaskSpecificField float64 `json:"task_specific_field"`
AnotherSpecificField string `json:"another_specific_field"`
}
func TestGetTaskFieldValue_EmbeddedStructFields(t *testing.T) {
config := &TestTaskConfigForTemplate{
TestBaseConfigForTemplate: TestBaseConfigForTemplate{
Enabled: true,
ScanIntervalSeconds: 2400,
MaxConcurrent: 5,
},
TaskSpecificField: 0.18,
AnotherSpecificField: "test_value",
}
// Test embedded struct fields
tests := []struct {
fieldName string
expectedValue interface{}
description string
}{
{"enabled", true, "BaseConfig boolean field"},
{"scan_interval_seconds", 2400, "BaseConfig integer field"},
{"max_concurrent", 5, "BaseConfig integer field"},
{"task_specific_field", 0.18, "Task-specific float field"},
{"another_specific_field", "test_value", "Task-specific string field"},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
result := getTaskFieldValue(config, test.fieldName)
if result != test.expectedValue {
t.Errorf("Field %s: expected %v (%T), got %v (%T)",
test.fieldName, test.expectedValue, test.expectedValue, result, result)
}
})
}
}
func TestGetTaskFieldValue_NonExistentField(t *testing.T) {
config := &TestTaskConfigForTemplate{
TestBaseConfigForTemplate: TestBaseConfigForTemplate{
Enabled: true,
ScanIntervalSeconds: 1800,
MaxConcurrent: 3,
},
}
result := getTaskFieldValue(config, "non_existent_field")
if result != nil {
t.Errorf("Expected nil for non-existent field, got %v", result)
}
}
func TestGetTaskFieldValue_NilConfig(t *testing.T) {
var config *TestTaskConfigForTemplate = nil
result := getTaskFieldValue(config, "enabled")
if result != nil {
t.Errorf("Expected nil for nil config, got %v", result)
}
}
func TestGetTaskFieldValue_EmptyStruct(t *testing.T) {
config := &TestTaskConfigForTemplate{}
// Test that we can extract zero values
tests := []struct {
fieldName string
expectedValue interface{}
description string
}{
{"enabled", false, "Zero value boolean"},
{"scan_interval_seconds", 0, "Zero value integer"},
{"max_concurrent", 0, "Zero value integer"},
{"task_specific_field", 0.0, "Zero value float"},
{"another_specific_field", "", "Zero value string"},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
result := getTaskFieldValue(config, test.fieldName)
if result != test.expectedValue {
t.Errorf("Field %s: expected %v (%T), got %v (%T)",
test.fieldName, test.expectedValue, test.expectedValue, result, result)
}
})
}
}
func TestGetTaskFieldValue_NonStructConfig(t *testing.T) {
var config interface{} = "not a struct"
result := getTaskFieldValue(config, "enabled")
if result != nil {
t.Errorf("Expected nil for non-struct config, got %v", result)
}
}
func TestGetTaskFieldValue_PointerToStruct(t *testing.T) {
config := &TestTaskConfigForTemplate{
TestBaseConfigForTemplate: TestBaseConfigForTemplate{
Enabled: false,
ScanIntervalSeconds: 900,
MaxConcurrent: 2,
},
TaskSpecificField: 0.35,
}
// Test that pointers are handled correctly
enabledResult := getTaskFieldValue(config, "enabled")
if enabledResult != false {
t.Errorf("Expected false for enabled field, got %v", enabledResult)
}
intervalResult := getTaskFieldValue(config, "scan_interval_seconds")
if intervalResult != 900 {
t.Errorf("Expected 900 for scan_interval_seconds field, got %v", intervalResult)
}
}
func TestGetTaskFieldValue_FieldsWithJSONOmitempty(t *testing.T) {
// Test struct with omitempty tags
type TestConfigWithOmitempty struct {
TestBaseConfigForTemplate
OptionalField string `json:"optional_field,omitempty"`
}
config := &TestConfigWithOmitempty{
TestBaseConfigForTemplate: TestBaseConfigForTemplate{
Enabled: true,
ScanIntervalSeconds: 1200,
MaxConcurrent: 4,
},
OptionalField: "optional_value",
}
// Test that fields with omitempty are still found
result := getTaskFieldValue(config, "optional_field")
if result != "optional_value" {
t.Errorf("Expected 'optional_value' for optional_field, got %v", result)
}
// Test embedded fields still work
enabledResult := getTaskFieldValue(config, "enabled")
if enabledResult != true {
t.Errorf("Expected true for enabled field, got %v", enabledResult)
}
}
func TestGetTaskFieldValue_DeepEmbedding(t *testing.T) {
// Test with multiple levels of embedding
type DeepBaseConfig struct {
DeepField string `json:"deep_field"`
}
type MiddleConfig struct {
DeepBaseConfig
MiddleField int `json:"middle_field"`
}
type TopConfig struct {
MiddleConfig
TopField bool `json:"top_field"`
}
config := &TopConfig{
MiddleConfig: MiddleConfig{
DeepBaseConfig: DeepBaseConfig{
DeepField: "deep_value",
},
MiddleField: 123,
},
TopField: true,
}
// Test that deeply embedded fields are found
deepResult := getTaskFieldValue(config, "deep_field")
if deepResult != "deep_value" {
t.Errorf("Expected 'deep_value' for deep_field, got %v", deepResult)
}
middleResult := getTaskFieldValue(config, "middle_field")
if middleResult != 123 {
t.Errorf("Expected 123 for middle_field, got %v", middleResult)
}
topResult := getTaskFieldValue(config, "top_field")
if topResult != true {
t.Errorf("Expected true for top_field, got %v", topResult)
}
}
// Benchmark to ensure performance is reasonable
func BenchmarkGetTaskFieldValue(b *testing.B) {
config := &TestTaskConfigForTemplate{
TestBaseConfigForTemplate: TestBaseConfigForTemplate{
Enabled: true,
ScanIntervalSeconds: 1800,
MaxConcurrent: 3,
},
TaskSpecificField: 0.25,
AnotherSpecificField: "benchmark_test",
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Test both embedded and regular fields
_ = getTaskFieldValue(config, "enabled")
_ = getTaskFieldValue(config, "task_specific_field")
}
}

View File

@@ -1,174 +0,0 @@
// Code generated by templ - DO NOT EDIT.
// templ: version: v0.3.977
package app
//lint:file-ignore SA4006 This context is only used if a nested component is present.
import "github.com/a-h/templ"
import templruntime "github.com/a-h/templ/runtime"
import (
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
)
func TaskConfig(data *maintenance.TaskConfigData) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var1 := templ.GetChildren(ctx)
if templ_7745c5c3_Var1 == nil {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"container-fluid\"><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"d-flex justify-content-between align-items-center\"><h2 class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var2 = []any{data.TaskIcon + " me-2"}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var2...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<i class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var2).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "\"></i> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.TaskName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config.templ`, Line: 14, Col: 38}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, " Configuration</h2><div class=\"btn-group\"><a href=\"/maintenance/config\" class=\"btn btn-outline-secondary\"><i class=\"fas fa-arrow-left me-1\"></i> Back to Configuration</a> <a href=\"/maintenance\" class=\"btn btn-outline-primary\"><i class=\"fas fa-list me-1\"></i> View Queue</a></div></div></div></div><div class=\"row\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 = []any{data.TaskIcon + " me-2"}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var5...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "<i class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var5).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "\"></i> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(data.TaskName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config.templ`, Line: 36, Col: 42}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, " Settings</h5></div><div class=\"card-body\"><p class=\"text-muted mb-4\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config.templ`, Line: 40, Col: 68}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "</p><!-- Task-specific configuration form --><form method=\"POST\"><div class=\"task-config-form\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templ.Raw(string(data.ConfigFormHTML)).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "</div><hr class=\"my-4\"><div class=\"d-flex gap-2\"><button type=\"submit\" class=\"btn btn-primary\"><i class=\"fas fa-save me-1\"></i> Save Configuration</button> <button type=\"button\" class=\"btn btn-secondary\" onclick=\"resetForm()\"><i class=\"fas fa-undo me-1\"></i> Reset to Defaults</button> <a href=\"/maintenance/config\" class=\"btn btn-outline-secondary\"><i class=\"fas fa-times me-1\"></i> Cancel</a></div></form></div></div></div></div><!-- Task Information --><div class=\"row mt-4\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-header\"><h5 class=\"mb-0\"><i class=\"fas fa-info-circle me-2\"></i> Task Information</h5></div><div class=\"card-body\"><div class=\"row\"><div class=\"col-md-6\"><h6 class=\"text-muted\">Task Type</h6><p class=\"mb-3\"><span class=\"badge bg-secondary\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var9 string
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(string(data.TaskType))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config.templ`, Line: 85, Col: 91}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "</span></p></div><div class=\"col-md-6\"><h6 class=\"text-muted\">Display Name</h6><p class=\"mb-3\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var10 string
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.TaskName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config.templ`, Line: 90, Col: 62}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "</p></div></div><div class=\"row\"><div class=\"col-12\"><h6 class=\"text-muted\">Description</h6><p class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var11 string
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config.templ`, Line: 96, Col: 65}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</p></div></div></div></div></div></div></div><script>\n function resetForm() {\n showConfirm('Are you sure you want to reset all settings to their default values?', function() {\n // Find all form inputs and reset them\n const form = document.querySelector('form');\n if (form) {\n form.reset();\n }\n });\n }\n\n // Auto-save form data to localStorage for recovery\n document.addEventListener('DOMContentLoaded', function() {\n const form = document.querySelector('form');\n if (form) {\n const taskType = '{string(data.TaskType)}';\n const storageKey = 'taskConfig_' + taskType;\n\n // Load saved data\n const savedData = localStorage.getItem(storageKey);\n if (savedData) {\n try {\n const data = JSON.parse(savedData);\n Object.keys(data).forEach(key => {\n const input = form.querySelector(`[name=\"${key}\"]`);\n if (input) {\n if (input.type === 'checkbox') {\n input.checked = data[key];\n } else {\n input.value = data[key];\n }\n }\n });\n } catch (e) {\n console.warn('Failed to load saved configuration:', e);\n }\n }\n\n // Save data on input change\n form.addEventListener('input', function() {\n const formData = new FormData(form);\n const data = {};\n for (let [key, value] of formData.entries()) {\n data[key] = value;\n }\n localStorage.setItem(storageKey, JSON.stringify(data));\n });\n\n // Clear saved data on successful submit\n form.addEventListener('submit', function() {\n localStorage.removeItem(storageKey);\n });\n }\n });\n </script>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return nil
})
}
var _ = templruntime.GeneratedTemplate

View File

@@ -1,160 +0,0 @@
package app
import (
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
"github.com/seaweedfs/seaweedfs/weed/admin/view/components"
)
// TaskConfigTemplData represents data for templ-based task configuration
type TaskConfigTemplData struct {
TaskType maintenance.MaintenanceTaskType
TaskName string
TaskIcon string
Description string
ConfigSections []components.ConfigSectionData
}
templ TaskConfigTempl(data *TaskConfigTemplData) {
<div class="container-fluid">
<div class="row mb-4">
<div class="col-12">
<div class="d-flex justify-content-between align-items-center">
<h2 class="mb-0">
<i class={data.TaskIcon + " me-2"}></i>
{data.TaskName} Configuration
</h2>
<div class="btn-group">
<a href="/maintenance/config" class="btn btn-outline-secondary">
<i class="fas fa-arrow-left me-1"></i>
Back to Configuration
</a>
<a href="/maintenance/queue" class="btn btn-outline-info">
<i class="fas fa-list me-1"></i>
View Queue
</a>
</div>
</div>
</div>
</div>
<div class="row mb-4">
<div class="col-12">
<div class="alert alert-info" role="alert">
<i class="fas fa-info-circle me-2"></i>
{data.Description}
</div>
</div>
</div>
<form method="POST" class="needs-validation" novalidate>
<!-- Render all configuration sections -->
for _, section := range data.ConfigSections {
@components.ConfigSection(section)
}
<!-- Form actions -->
<div class="row">
<div class="col-12">
<div class="card">
<div class="card-body">
<div class="d-flex justify-content-between">
<div>
<button type="submit" class="btn btn-primary">
<i class="fas fa-save me-1"></i>
Save Configuration
</button>
<button type="button" class="btn btn-outline-secondary ms-2" onclick="resetForm()">
<i class="fas fa-undo me-1"></i>
Reset
</button>
</div>
<div>
<button type="button" class="btn btn-outline-info" onclick="testConfiguration()">
<i class="fas fa-play me-1"></i>
Test Configuration
</button>
</div>
</div>
</div>
</div>
</div>
</div>
</form>
</div>
<script>
// Form validation
(function() {
'use strict';
window.addEventListener('load', function() {
var forms = document.getElementsByClassName('needs-validation');
var validation = Array.prototype.filter.call(forms, function(form) {
form.addEventListener('submit', function(event) {
if (form.checkValidity() === false) {
event.preventDefault();
event.stopPropagation();
}
form.classList.add('was-validated');
}, false);
});
}, false);
})();
// Auto-save functionality
let autoSaveTimeout;
function autoSave() {
clearTimeout(autoSaveTimeout);
autoSaveTimeout = setTimeout(function() {
const formData = new FormData(document.querySelector('form'));
localStorage.setItem('task_config_' + '{data.TaskType}', JSON.stringify(Object.fromEntries(formData)));
}, 1000);
}
// Add auto-save listeners to all form inputs
document.addEventListener('DOMContentLoaded', function() {
const form = document.querySelector('form');
if (form) {
form.addEventListener('input', autoSave);
form.addEventListener('change', autoSave);
}
});
// Reset form function
function resetForm() {
showConfirm('Are you sure you want to reset all changes?', function() {
location.reload();
});
}
// Test configuration function
function testConfiguration() {
const formData = new FormData(document.querySelector('form'));
// Show loading state
const testBtn = document.querySelector('button[onclick="testConfiguration()"]');
const originalContent = testBtn.innerHTML;
testBtn.innerHTML = '<i class="fas fa-spinner fa-spin me-1"></i>Testing...';
testBtn.disabled = true;
fetch('/maintenance/config/{data.TaskType}/test', {
method: 'POST',
body: formData
})
.then(response => response.json())
.then(data => {
if (data.success) {
alert('Configuration test successful!');
} else {
alert('Configuration test failed: ' + data.error);
}
})
.catch(error => {
alert('Test failed: ' + error);
})
.finally(() => {
testBtn.innerHTML = originalContent;
testBtn.disabled = false;
});
}
</script>
}

View File

@@ -1,112 +0,0 @@
// Code generated by templ - DO NOT EDIT.
// templ: version: v0.3.977
package app
//lint:file-ignore SA4006 This context is only used if a nested component is present.
import "github.com/a-h/templ"
import templruntime "github.com/a-h/templ/runtime"
import (
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
"github.com/seaweedfs/seaweedfs/weed/admin/view/components"
)
// TaskConfigTemplData represents data for templ-based task configuration
type TaskConfigTemplData struct {
TaskType maintenance.MaintenanceTaskType
TaskName string
TaskIcon string
Description string
ConfigSections []components.ConfigSectionData
}
func TaskConfigTempl(data *TaskConfigTemplData) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var1 := templ.GetChildren(ctx)
if templ_7745c5c3_Var1 == nil {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<div class=\"container-fluid\"><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"d-flex justify-content-between align-items-center\"><h2 class=\"mb-0\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var2 = []any{data.TaskIcon + " me-2"}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var2...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<i class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var2).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_templ.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "\"></i> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.TaskName)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_templ.templ`, Line: 24, Col: 38}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, " Configuration</h2><div class=\"btn-group\"><a href=\"/maintenance/config\" class=\"btn btn-outline-secondary\"><i class=\"fas fa-arrow-left me-1\"></i> Back to Configuration</a> <a href=\"/maintenance/queue\" class=\"btn btn-outline-info\"><i class=\"fas fa-list me-1\"></i> View Queue</a></div></div></div></div><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"alert alert-info\" role=\"alert\"><i class=\"fas fa-info-circle me-2\"></i> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/app/task_config_templ.templ`, Line: 44, Col: 37}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</div></div></div><form method=\"POST\" class=\"needs-validation\" novalidate><!-- Render all configuration sections -->")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, section := range data.ConfigSections {
templ_7745c5c3_Err = components.ConfigSection(section).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<!-- Form actions --><div class=\"row\"><div class=\"col-12\"><div class=\"card\"><div class=\"card-body\"><div class=\"d-flex justify-content-between\"><div><button type=\"submit\" class=\"btn btn-primary\"><i class=\"fas fa-save me-1\"></i> Save Configuration</button> <button type=\"button\" class=\"btn btn-outline-secondary ms-2\" onclick=\"resetForm()\"><i class=\"fas fa-undo me-1\"></i> Reset</button></div><div><button type=\"button\" class=\"btn btn-outline-info\" onclick=\"testConfiguration()\"><i class=\"fas fa-play me-1\"></i> Test Configuration</button></div></div></div></div></div></div></form></div><script>\n // Form validation\n (function() {\n 'use strict';\n window.addEventListener('load', function() {\n var forms = document.getElementsByClassName('needs-validation');\n var validation = Array.prototype.filter.call(forms, function(form) {\n form.addEventListener('submit', function(event) {\n if (form.checkValidity() === false) {\n event.preventDefault();\n event.stopPropagation();\n }\n form.classList.add('was-validated');\n }, false);\n });\n }, false);\n })();\n\n // Auto-save functionality\n let autoSaveTimeout;\n function autoSave() {\n clearTimeout(autoSaveTimeout);\n autoSaveTimeout = setTimeout(function() {\n const formData = new FormData(document.querySelector('form'));\n localStorage.setItem('task_config_' + '{data.TaskType}', JSON.stringify(Object.fromEntries(formData)));\n }, 1000);\n }\n\n // Add auto-save listeners to all form inputs\n document.addEventListener('DOMContentLoaded', function() {\n const form = document.querySelector('form');\n if (form) {\n form.addEventListener('input', autoSave);\n form.addEventListener('change', autoSave);\n }\n });\n\n // Reset form function\n function resetForm() {\n showConfirm('Are you sure you want to reset all changes?', function() {\n location.reload();\n });\n }\n\n // Test configuration function\n function testConfiguration() {\n const formData = new FormData(document.querySelector('form'));\n \n // Show loading state\n const testBtn = document.querySelector('button[onclick=\"testConfiguration()\"]');\n const originalContent = testBtn.innerHTML;\n testBtn.innerHTML = '<i class=\"fas fa-spinner fa-spin me-1\"></i>Testing...';\n testBtn.disabled = true;\n \n fetch('/maintenance/config/{data.TaskType}/test', {\n method: 'POST',\n body: formData\n })\n .then(response => response.json())\n .then(data => {\n if (data.success) {\n alert('Configuration test successful!');\n } else {\n alert('Configuration test failed: ' + data.error);\n }\n })\n .catch(error => {\n alert('Test failed: ' + error);\n })\n .finally(() => {\n testBtn.innerHTML = originalContent;\n testBtn.disabled = false;\n });\n }\n </script>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return nil
})
}
var _ = templruntime.GeneratedTemplate

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -16,13 +16,14 @@ templ Layout(c *gin.Context, content templ.Component) {
} }
csrfToken := c.GetString("csrf_token") csrfToken := c.GetString("csrf_token")
// Detect if we're on a configuration page to keep submenu expanded
currentPath := c.Request.URL.Path currentPath := c.Request.URL.Path
isConfigPage := strings.HasPrefix(currentPath, "/maintenance/config") || currentPath == "/config"
// Detect if we're on a message queue page to keep submenu expanded // Detect if we're on a message queue page to keep submenu expanded
isMQPage := strings.HasPrefix(currentPath, "/mq/") isMQPage := strings.HasPrefix(currentPath, "/mq/")
// Detect if we're on plugin page.
isPluginPage := strings.HasPrefix(currentPath, "/plugin")
// Detect if we're on a storage page to keep submenu expanded // Detect if we're on a storage page to keep submenu expanded
isStoragePage := strings.HasPrefix(currentPath, "/storage/volumes") || strings.HasPrefix(currentPath, "/storage/ec-shards") || strings.HasPrefix(currentPath, "/storage/collections") isStoragePage := strings.HasPrefix(currentPath, "/storage/volumes") || strings.HasPrefix(currentPath, "/storage/ec-shards") || strings.HasPrefix(currentPath, "/storage/collections")
@@ -258,75 +259,61 @@ templ Layout(c *gin.Context, content templ.Component) {
</ul> </ul>
<h6 class="sidebar-heading px-3 mt-4 mb-1 text-muted"> <h6 class="sidebar-heading px-3 mt-4 mb-1 text-muted">
<span>MAINTENANCE</span> <span>WORKERS</span>
</h6> </h6>
<ul class="nav flex-column"> <ul class="nav flex-column">
<li class="nav-item"> <li class="nav-item">
if isConfigPage { if isPluginPage {
<a class="nav-link" href="#" data-bs-toggle="collapse" data-bs-target="#configurationSubmenu" aria-expanded="true" aria-controls="configurationSubmenu"> <a class="nav-link active" href="/plugin">
<i class="fas fa-cogs me-2"></i>Configuration <i class="fas fa-plug me-2"></i>Workers
<i class="fas fa-chevron-down ms-auto"></i>
</a> </a>
} else { } else {
<a class="nav-link collapsed" href="#" data-bs-toggle="collapse" data-bs-target="#configurationSubmenu" aria-expanded="false" aria-controls="configurationSubmenu"> <a class="nav-link" href="/plugin">
<i class="fas fa-cogs me-2"></i>Configuration <i class="fas fa-plug me-2"></i>Workers
<i class="fas fa-chevron-right ms-auto"></i>
</a> </a>
} }
if isConfigPage {
<div class="collapse show" id="configurationSubmenu">
<ul class="nav flex-column ms-3">
for _, menuItem := range GetConfigurationMenuItems() {
{{
isActiveItem := currentPath == menuItem.URL
}}
<li class="nav-item">
if isActiveItem {
<a class="nav-link py-2 active" href={templ.SafeURL(menuItem.URL)}>
<i class={menuItem.Icon + " me-2"}></i>{menuItem.Name}
</a>
} else {
<a class="nav-link py-2" href={templ.SafeURL(menuItem.URL)}>
<i class={menuItem.Icon + " me-2"}></i>{menuItem.Name}
</a>
}
</li>
}
</ul>
</div>
} else {
<div class="collapse" id="configurationSubmenu">
<ul class="nav flex-column ms-3">
for _, menuItem := range GetConfigurationMenuItems() {
<li class="nav-item">
<a class="nav-link py-2" href={templ.SafeURL(menuItem.URL)}>
<i class={menuItem.Icon + " me-2"}></i>{menuItem.Name}
</a>
</li>
}
</ul>
</div>
}
</li> </li>
<li class="nav-item"> <li class="nav-item">
if currentPath == "/maintenance" { if currentPath == "/plugin/detection" {
<a class="nav-link active" href="/maintenance"> <a class="nav-link active" href="/plugin/detection">
<i class="fas fa-search me-2"></i>Job Detection
</a>
} else {
<a class="nav-link" href="/plugin/detection">
<i class="fas fa-search me-2"></i>Job Detection
</a>
}
</li>
<li class="nav-item">
if currentPath == "/plugin/queue" {
<a class="nav-link active" href="/plugin/queue">
<i class="fas fa-list me-2"></i>Job Queue <i class="fas fa-list me-2"></i>Job Queue
</a> </a>
} else { } else {
<a class="nav-link" href="/maintenance"> <a class="nav-link" href="/plugin/queue">
<i class="fas fa-list me-2"></i>Job Queue <i class="fas fa-list me-2"></i>Job Queue
</a> </a>
} }
</li> </li>
<li class="nav-item"> <li class="nav-item">
if currentPath == "/maintenance/workers" { if currentPath == "/plugin/execution" {
<a class="nav-link active" href="/maintenance/workers"> <a class="nav-link active" href="/plugin/execution">
<i class="fas fa-user-cog me-2"></i>Workers <i class="fas fa-tasks me-2"></i>Job Execution
</a> </a>
} else { } else {
<a class="nav-link" href="/maintenance/workers"> <a class="nav-link" href="/plugin/execution">
<i class="fas fa-user-cog me-2"></i>Workers <i class="fas fa-tasks me-2"></i>Job Execution
</a>
}
</li>
<li class="nav-item">
if currentPath == "/plugin/configuration" {
<a class="nav-link active" href="/plugin/configuration">
<i class="fas fa-sliders-h me-2"></i>Configuration
</a>
} else {
<a class="nav-link" href="/plugin/configuration">
<i class="fas fa-sliders-h me-2"></i>Configuration
</a> </a>
} }
</li> </li>

View File

@@ -43,13 +43,14 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
} }
csrfToken := c.GetString("csrf_token") csrfToken := c.GetString("csrf_token")
// Detect if we're on a configuration page to keep submenu expanded
currentPath := c.Request.URL.Path currentPath := c.Request.URL.Path
isConfigPage := strings.HasPrefix(currentPath, "/maintenance/config") || currentPath == "/config"
// Detect if we're on a message queue page to keep submenu expanded // Detect if we're on a message queue page to keep submenu expanded
isMQPage := strings.HasPrefix(currentPath, "/mq/") isMQPage := strings.HasPrefix(currentPath, "/mq/")
// Detect if we're on plugin page.
isPluginPage := strings.HasPrefix(currentPath, "/plugin")
// Detect if we're on a storage page to keep submenu expanded // Detect if we're on a storage page to keep submenu expanded
isStoragePage := strings.HasPrefix(currentPath, "/storage/volumes") || strings.HasPrefix(currentPath, "/storage/ec-shards") || strings.HasPrefix(currentPath, "/storage/collections") isStoragePage := strings.HasPrefix(currentPath, "/storage/volumes") || strings.HasPrefix(currentPath, "/storage/ec-shards") || strings.HasPrefix(currentPath, "/storage/collections")
@@ -62,7 +63,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
var templ_7745c5c3_Var2 string var templ_7745c5c3_Var2 string
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(csrfToken) templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(csrfToken)
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 38, Col: 47} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 39, Col: 47}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
@@ -75,7 +76,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
var templ_7745c5c3_Var3 string var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(username) templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(username)
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 69, Col: 73} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 70, Col: 73}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
@@ -110,7 +111,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
var templ_7745c5c3_Var6 string var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%t", isClusterPage)) templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%t", isClusterPage))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 96, Col: 207} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 97, Col: 207}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
@@ -167,7 +168,7 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
var templ_7745c5c3_Var11 string var templ_7745c5c3_Var11 string
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%t", isStoragePage)) templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%t", isStoragePage))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 121, Col: 207} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 122, Col: 207}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
@@ -251,243 +252,82 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</li><!-- Commented out for later --><!--\n <li class=\"nav-item\">\n <a class=\"nav-link\" href=\"/metrics\">\n <i class=\"fas fa-chart-line me-2\"></i>Metrics\n </a>\n </li>\n <li class=\"nav-item\">\n <a class=\"nav-link\" href=\"/logs\">\n <i class=\"fas fa-file-alt me-2\"></i>Logs\n </a>\n </li>\n --></ul><h6 class=\"sidebar-heading px-3 mt-4 mb-1 text-muted\"><span>MAINTENANCE</span></h6><ul class=\"nav flex-column\"><li class=\"nav-item\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</li><!-- Commented out for later --><!--\n <li class=\"nav-item\">\n <a class=\"nav-link\" href=\"/metrics\">\n <i class=\"fas fa-chart-line me-2\"></i>Metrics\n </a>\n </li>\n <li class=\"nav-item\">\n <a class=\"nav-link\" href=\"/logs\">\n <i class=\"fas fa-file-alt me-2\"></i>Logs\n </a>\n </li>\n --></ul><h6 class=\"sidebar-heading px-3 mt-4 mb-1 text-muted\"><span>WORKERS</span></h6><ul class=\"nav flex-column\"><li class=\"nav-item\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
if isConfigPage { if isPluginPage {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "<a class=\"nav-link\" href=\"#\" data-bs-toggle=\"collapse\" data-bs-target=\"#configurationSubmenu\" aria-expanded=\"true\" aria-controls=\"configurationSubmenu\"><i class=\"fas fa-cogs me-2\"></i>Configuration <i class=\"fas fa-chevron-down ms-auto\"></i></a> ") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "<a class=\"nav-link active\" href=\"/plugin\"><i class=\"fas fa-plug me-2\"></i>Workers</a>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} else { } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "<a class=\"nav-link collapsed\" href=\"#\" data-bs-toggle=\"collapse\" data-bs-target=\"#configurationSubmenu\" aria-expanded=\"false\" aria-controls=\"configurationSubmenu\"><i class=\"fas fa-cogs me-2\"></i>Configuration <i class=\"fas fa-chevron-right ms-auto\"></i></a> ") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "<a class=\"nav-link\" href=\"/plugin\"><i class=\"fas fa-plug me-2\"></i>Workers</a>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
if isConfigPage { templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "</li><li class=\"nav-item\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "<div class=\"collapse show\" id=\"configurationSubmenu\"><ul class=\"nav flex-column ms-3\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, menuItem := range GetConfigurationMenuItems() {
isActiveItem := currentPath == menuItem.URL
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<li class=\"nav-item\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if isActiveItem {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "<a class=\"nav-link py-2 active\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 templ.SafeURL
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(menuItem.URL))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 285, Col: 117}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 = []any{menuItem.Icon + " me-2"}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var15...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "<i class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var15).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(menuItem.Name)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 286, Col: 109}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</a>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "<a class=\"nav-link py-2\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var18 templ.SafeURL
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(menuItem.URL))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 289, Col: 110}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var19 = []any{menuItem.Icon + " me-2"}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var19...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "<i class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var19).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var21 string
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(menuItem.Name)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 290, Col: 109}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "</a>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "</li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "</ul></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "<div class=\"collapse\" id=\"configurationSubmenu\"><ul class=\"nav flex-column ms-3\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, menuItem := range GetConfigurationMenuItems() {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "<li class=\"nav-item\"><a class=\"nav-link py-2\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var22 templ.SafeURL
templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(menuItem.URL))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 302, Col: 106}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var23 = []any{menuItem.Icon + " me-2"}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var23...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "<i class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var24 string
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var23).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "\"></i>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var25 string
templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(menuItem.Name)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 303, Col: 105}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "</a></li>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "</ul></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "</li><li class=\"nav-item\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
if currentPath == "/maintenance" { if currentPath == "/plugin/detection" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "<a class=\"nav-link active\" href=\"/maintenance\"><i class=\"fas fa-list me-2\"></i>Job Queue</a>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<a class=\"nav-link active\" href=\"/plugin/detection\"><i class=\"fas fa-search me-2\"></i>Job Detection</a>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} else { } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "<a class=\"nav-link\" href=\"/maintenance\"><i class=\"fas fa-list me-2\"></i>Job Queue</a>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "<a class=\"nav-link\" href=\"/plugin/detection\"><i class=\"fas fa-search me-2\"></i>Job Detection</a>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "</li><li class=\"nav-item\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "</li><li class=\"nav-item\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
if currentPath == "/maintenance/workers" { if currentPath == "/plugin/queue" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "<a class=\"nav-link active\" href=\"/maintenance/workers\"><i class=\"fas fa-user-cog me-2\"></i>Workers</a>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "<a class=\"nav-link active\" href=\"/plugin/queue\"><i class=\"fas fa-list me-2\"></i>Job Queue</a>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} else { } else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "<a class=\"nav-link\" href=\"/maintenance/workers\"><i class=\"fas fa-user-cog me-2\"></i>Workers</a>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<a class=\"nav-link\" href=\"/plugin/queue\"><i class=\"fas fa-list me-2\"></i>Job Queue</a>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "</li></ul></div></div><!-- Main content --><main class=\"col-md-9 ms-sm-auto col-lg-10 px-md-4\"><div class=\"pt-3\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</li><li class=\"nav-item\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if currentPath == "/plugin/execution" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "<a class=\"nav-link active\" href=\"/plugin/execution\"><i class=\"fas fa-tasks me-2\"></i>Job Execution</a>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "<a class=\"nav-link\" href=\"/plugin/execution\"><i class=\"fas fa-tasks me-2\"></i>Job Execution</a>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "</li><li class=\"nav-item\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if currentPath == "/plugin/configuration" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "<a class=\"nav-link active\" href=\"/plugin/configuration\"><i class=\"fas fa-sliders-h me-2\"></i>Configuration</a>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "<a class=\"nav-link\" href=\"/plugin/configuration\"><i class=\"fas fa-sliders-h me-2\"></i>Configuration</a>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "</li></ul></div></div><!-- Main content --><main class=\"col-md-9 ms-sm-auto col-lg-10 px-md-4\"><div class=\"pt-3\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
@@ -495,43 +335,43 @@ func Layout(c *gin.Context, content templ.Component) templ.Component {
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "</div></main></div></div><!-- Footer --><footer class=\"footer mt-auto py-3 bg-light\"><div class=\"container-fluid text-center\"><small class=\"text-muted\">&copy; ") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "</div></main></div></div><!-- Footer --><footer class=\"footer mt-auto py-3 bg-light\"><div class=\"container-fluid text-center\"><small class=\"text-muted\">&copy; ")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var26 string var templ_7745c5c3_Var14 string
templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", time.Now().Year())) templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", time.Now().Year()))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 350, Col: 60} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 337, Col: 60}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, " SeaweedFS Admin v") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, " SeaweedFS Admin v")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var27 string var templ_7745c5c3_Var15 string
templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(version.VERSION_NUMBER) templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(version.VERSION_NUMBER)
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 350, Col: 102} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 337, Col: 102}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, " ") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, " ")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
if !strings.Contains(version.VERSION, "enterprise") { if !strings.Contains(version.VERSION, "enterprise") {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "<span class=\"mx-2\">•</span> <a href=\"https://seaweedfs.com\" target=\"_blank\" class=\"text-decoration-none\"><i class=\"fas fa-star me-1\"></i>Enterprise Version Available</a>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "<span class=\"mx-2\">•</span> <a href=\"https://seaweedfs.com\" target=\"_blank\" class=\"text-decoration-none\"><i class=\"fas fa-star me-1\"></i>Enterprise Version Available</a>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "</small></div></footer><!-- Bootstrap JS --><script src=\"/static/js/bootstrap.bundle.min.js\"></script><!-- Modal Alerts JS (replaces native alert/confirm) --><script src=\"/static/js/modal-alerts.js\"></script><!-- Custom JS --><script src=\"/static/js/admin.js\"></script><script src=\"/static/js/iam-utils.js\"></script><script src=\"/static/js/s3tables.js\"></script></body></html>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "</small></div></footer><!-- Bootstrap JS --><script src=\"/static/js/bootstrap.bundle.min.js\"></script><!-- Modal Alerts JS (replaces native alert/confirm) --><script src=\"/static/js/modal-alerts.js\"></script><!-- Custom JS --><script src=\"/static/js/admin.js\"></script><script src=\"/static/js/iam-utils.js\"></script><script src=\"/static/js/s3tables.js\"></script></body></html>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
@@ -555,61 +395,61 @@ func LoginForm(c *gin.Context, title string, errorMessage string) templ.Componen
}() }()
} }
ctx = templ.InitializeContext(ctx) ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var28 := templ.GetChildren(ctx) templ_7745c5c3_Var16 := templ.GetChildren(ctx)
if templ_7745c5c3_Var28 == nil { if templ_7745c5c3_Var16 == nil {
templ_7745c5c3_Var28 = templ.NopComponent templ_7745c5c3_Var16 = templ.NopComponent
} }
ctx = templ.ClearChildren(ctx) ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "<!doctype html><html lang=\"en\"><head><meta charset=\"UTF-8\"><title>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "<!doctype html><html lang=\"en\"><head><meta charset=\"UTF-8\"><title>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var29 string var templ_7745c5c3_Var17 string
templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(title) templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(title)
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 378, Col: 17} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 365, Col: 17}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, " - Login</title><link rel=\"icon\" href=\"/static/favicon.ico\" type=\"image/x-icon\"><meta name=\"viewport\" content=\"width=device-width, initial-scale=1\"><link href=\"/static/css/bootstrap.min.css\" rel=\"stylesheet\"><link href=\"/static/css/fontawesome.min.css\" rel=\"stylesheet\"></head><body class=\"bg-light\"><div class=\"container\"><div class=\"row justify-content-center min-vh-100 align-items-center\"><div class=\"col-md-6 col-lg-4\"><div class=\"card shadow\"><div class=\"card-body p-5\"><div class=\"text-center mb-4\"><i class=\"fas fa-server fa-3x text-primary mb-3\"></i><h4 class=\"card-title\">") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, " - Login</title><link rel=\"icon\" href=\"/static/favicon.ico\" type=\"image/x-icon\"><meta name=\"viewport\" content=\"width=device-width, initial-scale=1\"><link href=\"/static/css/bootstrap.min.css\" rel=\"stylesheet\"><link href=\"/static/css/fontawesome.min.css\" rel=\"stylesheet\"></head><body class=\"bg-light\"><div class=\"container\"><div class=\"row justify-content-center min-vh-100 align-items-center\"><div class=\"col-md-6 col-lg-4\"><div class=\"card shadow\"><div class=\"card-body p-5\"><div class=\"text-center mb-4\"><i class=\"fas fa-server fa-3x text-primary mb-3\"></i><h4 class=\"card-title\">")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var30 string var templ_7745c5c3_Var18 string
templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(title) templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(title)
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 392, Col: 57} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 379, Col: 57}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "</h4><p class=\"text-muted\">Please sign in to continue</p></div>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "</h4><p class=\"text-muted\">Please sign in to continue</p></div>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
if errorMessage != "" { if errorMessage != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "<div class=\"alert alert-danger\" role=\"alert\"><i class=\"fas fa-exclamation-triangle me-2\"></i> ") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "<div class=\"alert alert-danger\" role=\"alert\"><i class=\"fas fa-exclamation-triangle me-2\"></i> ")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
var templ_7745c5c3_Var31 string var templ_7745c5c3_Var19 string
templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs(errorMessage) templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(errorMessage)
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 399, Col: 45} return templ.Error{Err: templ_7745c5c3_Err, FileName: `weed/admin/view/layout/layout.templ`, Line: 386, Col: 45}
} }
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31)) _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "</div>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "</div>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }
} }
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "<form method=\"POST\" action=\"/login\"><div class=\"mb-3\"><label for=\"username\" class=\"form-label\">Username</label><div class=\"input-group\"><span class=\"input-group-text\"><i class=\"fas fa-user\"></i></span> <input type=\"text\" class=\"form-control\" id=\"username\" name=\"username\" required></div></div><div class=\"mb-4\"><label for=\"password\" class=\"form-label\">Password</label><div class=\"input-group\"><span class=\"input-group-text\"><i class=\"fas fa-lock\"></i></span> <input type=\"password\" class=\"form-control\" id=\"password\" name=\"password\" required></div></div><button type=\"submit\" class=\"btn btn-primary w-100\"><i class=\"fas fa-sign-in-alt me-2\"></i>Sign In</button></form></div></div></div></div></div><script src=\"/static/js/bootstrap.bundle.min.js\"></script></body></html>") templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "<form method=\"POST\" action=\"/login\"><div class=\"mb-3\"><label for=\"username\" class=\"form-label\">Username</label><div class=\"input-group\"><span class=\"input-group-text\"><i class=\"fas fa-user\"></i></span> <input type=\"text\" class=\"form-control\" id=\"username\" name=\"username\" required></div></div><div class=\"mb-4\"><label for=\"password\" class=\"form-label\">Password</label><div class=\"input-group\"><span class=\"input-group-text\"><i class=\"fas fa-lock\"></i></span> <input type=\"password\" class=\"form-control\" id=\"password\" name=\"password\" required></div></div><button type=\"submit\" class=\"btn btn-primary w-100\"><i class=\"fas fa-sign-in-alt me-2\"></i>Sign In</button></form></div></div></div></div></div><script src=\"/static/js/bootstrap.bundle.min.js\"></script></body></html>")
if templ_7745c5c3_Err != nil { if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err return templ_7745c5c3_Err
} }

View File

@@ -1,47 +0,0 @@
package layout
import (
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
// Import task packages to trigger their auto-registration
_ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
_ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
_ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
)
// MenuItemData represents a menu item
type MenuItemData struct {
Name string
URL string
Icon string
Description string
}
// GetConfigurationMenuItems returns the dynamic configuration menu items
func GetConfigurationMenuItems() []*MenuItemData {
var menuItems []*MenuItemData
// Add system configuration item
menuItems = append(menuItems, &MenuItemData{
Name: "System",
URL: "/maintenance/config",
Icon: "fas fa-cogs",
Description: "System-level configuration",
})
// Get all registered task types and add them as submenu items
registeredTypes := maintenance.GetRegisteredMaintenanceTaskTypes()
for _, taskType := range registeredTypes {
menuItem := &MenuItemData{
Name: maintenance.GetTaskDisplayName(taskType),
URL: "/maintenance/config/" + string(taskType),
Icon: maintenance.GetTaskIcon(taskType),
Description: maintenance.GetTaskDescription(taskType),
}
menuItems = append(menuItems, menuItem)
}
return menuItems
}

View File

@@ -117,6 +117,12 @@ var cmdAdmin = &Command{
- TLS is automatically used if certificates are configured - TLS is automatically used if certificates are configured
- Workers fall back to insecure connections if TLS is unavailable - Workers fall back to insecure connections if TLS is unavailable
Plugin:
- Always enabled on the worker gRPC port
- Registers plugin.proto gRPC service on the same worker gRPC port
- External workers connect with: weed worker -admin=<admin_host:admin_port>
- Persists plugin metadata under dataDir/plugin when dataDir is configured
Configuration File: Configuration File:
- The security.toml file is read from ".", "$HOME/.seaweedfs/", - The security.toml file is read from ".", "$HOME/.seaweedfs/",
"/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order "/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order
@@ -197,6 +203,7 @@ func runAdmin(cmd *Command, args []string) bool {
} else { } else {
fmt.Printf("Authentication: Disabled\n") fmt.Printf("Authentication: Disabled\n")
} }
fmt.Printf("Plugin: Enabled\n")
// Set up graceful shutdown // Set up graceful shutdown
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@@ -295,7 +302,7 @@ func startAdminServer(ctx context.Context, options AdminOptions, enableUI bool,
r.StaticFS("/static", http.FS(staticFS)) r.StaticFS("/static", http.FS(staticFS))
} }
// Create admin server // Create admin server (plugin is always enabled)
adminServer := dash.NewAdminServer(*options.master, nil, dataDir, icebergPort) adminServer := dash.NewAdminServer(*options.master, nil, dataDir, icebergPort)
// Show discovered filers // Show discovered filers

View File

@@ -0,0 +1,48 @@
package command
import (
"strings"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// parseCapabilities converts comma-separated legacy maintenance capabilities to task types.
// This remains for mini-mode maintenance worker wiring.
func parseCapabilities(capabilityStr string) []types.TaskType {
if capabilityStr == "" {
return nil
}
capabilityMap := map[string]types.TaskType{}
typesRegistry := tasks.GetGlobalTypesRegistry()
for taskType := range typesRegistry.GetAllDetectors() {
capabilityMap[strings.ToLower(string(taskType))] = taskType
}
if taskType, exists := capabilityMap["erasure_coding"]; exists {
capabilityMap["ec"] = taskType
}
if taskType, exists := capabilityMap["remote_upload"]; exists {
capabilityMap["remote"] = taskType
}
if taskType, exists := capabilityMap["fix_replication"]; exists {
capabilityMap["replication"] = taskType
}
var capabilities []types.TaskType
parts := strings.Split(capabilityStr, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
if taskType, exists := capabilityMap[part]; exists {
capabilities = append(capabilities, taskType)
} else {
glog.Warningf("Unknown capability: %s", part)
}
}
return capabilities
}

View File

@@ -13,11 +13,13 @@ import (
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb"
pluginworker "github.com/seaweedfs/seaweedfs/weed/plugin/worker"
"github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/security"
stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" stats_collect "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9" flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
"github.com/seaweedfs/seaweedfs/weed/util/grace" "github.com/seaweedfs/seaweedfs/weed/util/grace"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"github.com/seaweedfs/seaweedfs/weed/worker" "github.com/seaweedfs/seaweedfs/weed/worker"
"github.com/seaweedfs/seaweedfs/weed/worker/types" "github.com/seaweedfs/seaweedfs/weed/worker/types"
@@ -43,6 +45,7 @@ const (
defaultMiniVolumeSizeMB = 128 // Default volume size for mini mode defaultMiniVolumeSizeMB = 128 // Default volume size for mini mode
maxVolumeSizeMB = 1024 // Maximum volume size in MB (1GB) maxVolumeSizeMB = 1024 // Maximum volume size in MB (1GB)
GrpcPortOffset = 10000 // Offset used to calculate gRPC port from HTTP port GrpcPortOffset = 10000 // Offset used to calculate gRPC port from HTTP port
defaultMiniPluginJobTypes = "vacuum,volume_balance,erasure_coding"
) )
var ( var (
@@ -1028,6 +1031,7 @@ func startMiniAdminWithWorker(allServicesReady chan struct{}) {
// Start worker after admin server is ready // Start worker after admin server is ready
startMiniWorker() startMiniWorker()
startMiniPluginWorker(ctx)
// Wait for worker to be ready by polling its gRPC port // Wait for worker to be ready by polling its gRPC port
workerGrpcAddr := fmt.Sprintf("%s:%d", bindIp, *miniAdminOptions.grpcPort) workerGrpcAddr := fmt.Sprintf("%s:%d", bindIp, *miniAdminOptions.grpcPort)
@@ -1165,6 +1169,62 @@ func startMiniWorker() {
glog.Infof("Maintenance worker %s started successfully", workerInstance.ID()) glog.Infof("Maintenance worker %s started successfully", workerInstance.ID())
} }
func startMiniPluginWorker(ctx context.Context) {
glog.Infof("Starting plugin worker for admin server")
adminAddr := fmt.Sprintf("%s:%d", *miniIp, *miniAdminOptions.port)
resolvedAdminAddr := resolvePluginWorkerAdminServer(adminAddr)
if resolvedAdminAddr != adminAddr {
glog.Infof("Resolved mini plugin worker admin endpoint: %s -> %s", adminAddr, resolvedAdminAddr)
}
workerDir := filepath.Join(*miniDataFolders, "plugin_worker")
if err := os.MkdirAll(workerDir, 0755); err != nil {
glog.Fatalf("Failed to create plugin worker directory: %v", err)
}
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.worker")
handlers, err := buildPluginWorkerHandlers(defaultMiniPluginJobTypes, grpcDialOption)
if err != nil {
glog.Fatalf("Failed to build mini plugin worker handlers: %v", err)
}
workerID, err := resolvePluginWorkerID("", workerDir)
if err != nil {
glog.Fatalf("Failed to resolve mini plugin worker ID: %v", err)
}
pluginRuntime, err := pluginworker.NewWorker(pluginworker.WorkerOptions{
AdminServer: resolvedAdminAddr,
WorkerID: workerID,
WorkerVersion: version.Version(),
WorkerAddress: *miniIp,
HeartbeatInterval: 15 * time.Second,
ReconnectDelay: 5 * time.Second,
MaxDetectionConcurrency: 1,
MaxExecutionConcurrency: 2,
GrpcDialOption: grpcDialOption,
Handlers: handlers,
})
if err != nil {
glog.Fatalf("Failed to create mini plugin worker: %v", err)
}
go func() {
runCtx := ctx
if runCtx == nil {
runCtx = context.Background()
}
if runErr := pluginRuntime.Run(runCtx); runErr != nil && runCtx.Err() == nil {
glog.Errorf("Mini plugin worker stopped with error: %v", runErr)
}
}()
glog.Infof("Plugin worker %s started successfully with job types: %s", workerID, defaultMiniPluginJobTypes)
}
const credentialsInstructionTemplate = ` const credentialsInstructionTemplate = `
To create S3 credentials, you have two options: To create S3 credentials, you have two options:

View File

@@ -0,0 +1,13 @@
package command
import "testing"
func TestMiniDefaultPluginJobTypes(t *testing.T) {
jobTypes, err := parsePluginWorkerJobTypes(defaultMiniPluginJobTypes)
if err != nil {
t.Fatalf("parsePluginWorkerJobTypes(mini default) err = %v", err)
}
if len(jobTypes) != 3 {
t.Fatalf("expected mini default job types to include 3 handlers, got %v", jobTypes)
}
}

View File

@@ -0,0 +1,238 @@
package command
import (
"fmt"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
func TestBuildPluginWorkerHandler(t *testing.T) {
dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
handler, err := buildPluginWorkerHandler("vacuum", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandler(vacuum) err = %v", err)
}
if handler == nil {
t.Fatalf("expected non-nil handler")
}
handler, err = buildPluginWorkerHandler("", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandler(default) err = %v", err)
}
if handler == nil {
t.Fatalf("expected non-nil default handler")
}
handler, err = buildPluginWorkerHandler("volume_balance", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandler(volume_balance) err = %v", err)
}
if handler == nil {
t.Fatalf("expected non-nil volume_balance handler")
}
handler, err = buildPluginWorkerHandler("balance", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandler(balance alias) err = %v", err)
}
if handler == nil {
t.Fatalf("expected non-nil balance alias handler")
}
handler, err = buildPluginWorkerHandler("erasure_coding", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandler(erasure_coding) err = %v", err)
}
if handler == nil {
t.Fatalf("expected non-nil erasure_coding handler")
}
handler, err = buildPluginWorkerHandler("ec", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandler(ec alias) err = %v", err)
}
if handler == nil {
t.Fatalf("expected non-nil ec alias handler")
}
_, err = buildPluginWorkerHandler("unknown", dialOption)
if err == nil {
t.Fatalf("expected unsupported job type error")
}
}
func TestBuildPluginWorkerHandlers(t *testing.T) {
dialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
handlers, err := buildPluginWorkerHandlers("vacuum,volume_balance,erasure_coding", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandlers(list) err = %v", err)
}
if len(handlers) != 3 {
t.Fatalf("expected 3 handlers, got %d", len(handlers))
}
handlers, err = buildPluginWorkerHandlers("balance,ec,vacuum,balance", dialOption)
if err != nil {
t.Fatalf("buildPluginWorkerHandlers(aliases) err = %v", err)
}
if len(handlers) != 3 {
t.Fatalf("expected deduped 3 handlers, got %d", len(handlers))
}
_, err = buildPluginWorkerHandlers("unknown,vacuum", dialOption)
if err == nil {
t.Fatalf("expected unsupported job type error")
}
}
func TestParsePluginWorkerJobTypes(t *testing.T) {
jobTypes, err := parsePluginWorkerJobTypes("")
if err != nil {
t.Fatalf("parsePluginWorkerJobTypes(default) err = %v", err)
}
if len(jobTypes) != 1 || jobTypes[0] != "vacuum" {
t.Fatalf("expected default [vacuum], got %v", jobTypes)
}
jobTypes, err = parsePluginWorkerJobTypes(" volume_balance , ec , vacuum , volume_balance ")
if err != nil {
t.Fatalf("parsePluginWorkerJobTypes(list) err = %v", err)
}
if len(jobTypes) != 3 {
t.Fatalf("expected 3 deduped job types, got %d (%v)", len(jobTypes), jobTypes)
}
if jobTypes[0] != "volume_balance" || jobTypes[1] != "erasure_coding" || jobTypes[2] != "vacuum" {
t.Fatalf("unexpected parsed order %v", jobTypes)
}
if _, err = parsePluginWorkerJobTypes(" , "); err != nil {
t.Fatalf("expected empty list to resolve to default vacuum: %v", err)
}
}
func TestPluginWorkerDefaultJobTypes(t *testing.T) {
jobTypes, err := parsePluginWorkerJobTypes(defaultPluginWorkerJobTypes)
if err != nil {
t.Fatalf("parsePluginWorkerJobTypes(default setting) err = %v", err)
}
if len(jobTypes) != 3 {
t.Fatalf("expected default job types to include 3 handlers, got %v", jobTypes)
}
}
func TestResolvePluginWorkerID(t *testing.T) {
dir := t.TempDir()
explicit, err := resolvePluginWorkerID("worker-x", dir)
if err != nil {
t.Fatalf("resolvePluginWorkerID(explicit) err = %v", err)
}
if explicit != "worker-x" {
t.Fatalf("expected explicit id, got %q", explicit)
}
generated, err := resolvePluginWorkerID("", dir)
if err != nil {
t.Fatalf("resolvePluginWorkerID(generate) err = %v", err)
}
if generated == "" {
t.Fatalf("expected generated id")
}
if len(generated) < 7 || generated[:7] != "plugin-" {
t.Fatalf("expected generated id prefix plugin-, got %q", generated)
}
persistedPath := filepath.Join(dir, "plugin.worker.id")
if _, statErr := os.Stat(persistedPath); statErr != nil {
t.Fatalf("expected persisted worker id file: %v", statErr)
}
reused, err := resolvePluginWorkerID("", dir)
if err != nil {
t.Fatalf("resolvePluginWorkerID(reuse) err = %v", err)
}
if reused != generated {
t.Fatalf("expected reused id %q, got %q", generated, reused)
}
}
func TestParsePluginWorkerAdminAddress(t *testing.T) {
host, httpPort, hasExplicitGrpcPort, err := parsePluginWorkerAdminAddress("localhost:23646")
if err != nil {
t.Fatalf("parsePluginWorkerAdminAddress(localhost:23646) err = %v", err)
}
if host != "localhost" || httpPort != 23646 || hasExplicitGrpcPort {
t.Fatalf("unexpected parse result: host=%q httpPort=%d hasExplicit=%v", host, httpPort, hasExplicitGrpcPort)
}
host, httpPort, hasExplicitGrpcPort, err = parsePluginWorkerAdminAddress("localhost:23646.33646")
if err != nil {
t.Fatalf("parsePluginWorkerAdminAddress(localhost:23646.33646) err = %v", err)
}
if host != "localhost" || httpPort != 23646 || !hasExplicitGrpcPort {
t.Fatalf("unexpected dotted parse result: host=%q httpPort=%d hasExplicit=%v", host, httpPort, hasExplicitGrpcPort)
}
if _, _, _, err = parsePluginWorkerAdminAddress("localhost"); err == nil {
t.Fatalf("expected parse error for invalid address")
}
}
func TestResolvePluginWorkerAdminServerUsesStatusGrpcPort(t *testing.T) {
const grpcPort = 35432
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/api/plugin/status" {
http.NotFound(w, r)
return
}
_, _ = w.Write([]byte(fmt.Sprintf(`{"worker_grpc_port":%d}`, grpcPort)))
}))
defer server.Close()
adminAddress := strings.TrimPrefix(server.URL, "http://")
host, httpPort, _, err := parsePluginWorkerAdminAddress(adminAddress)
if err != nil {
t.Fatalf("parsePluginWorkerAdminAddress(%s) err = %v", adminAddress, err)
}
resolved := resolvePluginWorkerAdminServer(adminAddress)
expected := fmt.Sprintf("%s:%d.%d", host, httpPort, grpcPort)
if resolved != expected {
t.Fatalf("unexpected resolved admin address: got=%q want=%q", resolved, expected)
}
}
func TestResolvePluginWorkerAdminServerKeepsDefaultGrpcOffset(t *testing.T) {
var server *httptest.Server
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/api/plugin/status" {
http.NotFound(w, r)
return
}
address := strings.TrimPrefix(server.URL, "http://")
_, httpPort, _, parseErr := parsePluginWorkerAdminAddress(address)
if parseErr != nil {
http.Error(w, parseErr.Error(), http.StatusInternalServerError)
return
}
_, _ = w.Write([]byte(fmt.Sprintf(`{"worker_grpc_port":%d}`, httpPort+10000)))
}))
defer server.Close()
adminAddress := strings.TrimPrefix(server.URL, "http://")
resolved := resolvePluginWorkerAdminServer(adminAddress)
if resolved != adminAddress {
t.Fatalf("expected admin address to remain unchanged, got=%q want=%q", resolved, adminAddress)
}
}

View File

@@ -1,76 +1,54 @@
package command package command
import ( import (
"net/http"
"os"
"os/signal"
"path/filepath"
"strings"
"syscall"
"time" "time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/security"
statsCollect "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/grace" "github.com/seaweedfs/seaweedfs/weed/util/grace"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"github.com/seaweedfs/seaweedfs/weed/worker"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
// Import task packages to trigger their auto-registration
_ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
_ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
_ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
// TODO: Implement additional task packages (add to default capabilities when ready):
// _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/remote" - for uploading volumes to remote/cloud storage
// _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/replication" - for fixing replication issues and maintaining data consistency
"github.com/prometheus/client_golang/prometheus/promhttp"
) )
var cmdWorker = &Command{ var cmdWorker = &Command{
UsageLine: "worker -admin=<admin_server> [-capabilities=<task_types>] [-maxConcurrent=<num>] [-workingDir=<path>] [-metricsPort=<port>] [-debug]", UsageLine: "worker -admin=<admin_server> [-id=<worker_id>] [-jobType=vacuum,volume_balance,erasure_coding] [-workingDir=<path>] [-heartbeat=15s] [-reconnect=5s] [-maxDetect=1] [-maxExecute=4] [-metricsPort=<port>] [-metricsIp=<ip>] [-debug]",
Short: "start a maintenance worker to process cluster maintenance tasks", Short: "start a plugin.proto worker process",
Long: `Start a maintenance worker that connects to an admin server to process Long: `Start an external plugin worker using weed/pb/plugin.proto over gRPC.
maintenance tasks like vacuum, erasure coding, remote upload, and replication fixes.
The worker ID and address are automatically generated. This command provides vacuum, volume_balance, and erasure_coding job type
The worker connects to the admin server via gRPC (admin HTTP port + 10000). contracts with the plugin stream runtime, including descriptor delivery,
heartbeat/load reporting, detection, and execution.
Behavior:
- Use -jobType to choose one or more plugin job handlers (comma-separated list)
- Use -workingDir to persist plugin.worker.id for stable worker identity across restarts
- Use -metricsPort/-metricsIp to expose /health, /ready, and /metrics
Examples: Examples:
weed worker -admin=localhost:23646 weed worker -admin=localhost:23646
weed worker -admin=admin.example.com:23646 weed worker -admin=localhost:23646 -jobType=volume_balance
weed worker -admin=localhost:23646 -capabilities=vacuum,replication weed worker -admin=localhost:23646 -jobType=vacuum,volume_balance
weed worker -admin=localhost:23646 -maxConcurrent=4 weed worker -admin=localhost:23646 -jobType=erasure_coding
weed worker -admin=localhost:23646 -workingDir=/tmp/worker weed worker -admin=admin.example.com:23646 -id=plugin-vacuum-a -heartbeat=10s
weed worker -admin=localhost:23646 -metricsPort=9327 weed worker -admin=localhost:23646 -workingDir=/var/lib/seaweedfs-plugin
weed worker -admin=localhost:23646 -debug -debug.port=6060 weed worker -admin=localhost:23646 -metricsPort=9327 -metricsIp=0.0.0.0
`, `,
} }
var ( var (
workerAdminServer = cmdWorker.Flag.String("admin", "localhost:23646", "admin server address") workerAdminServer = cmdWorker.Flag.String("admin", "localhost:23646", "admin server address")
workerCapabilities = cmdWorker.Flag.String("capabilities", "vacuum,ec,balance", "comma-separated list of task types this worker can handle") workerID = cmdWorker.Flag.String("id", "", "worker ID (auto-generated when empty)")
workerMaxConcurrent = cmdWorker.Flag.Int("maxConcurrent", 2, "maximum number of concurrent tasks") workerWorkingDir = cmdWorker.Flag.String("workingDir", "", "working directory for persistent worker state")
workerHeartbeatInterval = cmdWorker.Flag.Duration("heartbeat", 30*time.Second, "heartbeat interval") workerJobType = cmdWorker.Flag.String("jobType", defaultPluginWorkerJobTypes, "job types to serve (comma-separated list)")
workerTaskRequestInterval = cmdWorker.Flag.Duration("taskInterval", 5*time.Second, "task request interval") workerHeartbeat = cmdWorker.Flag.Duration("heartbeat", 15*time.Second, "heartbeat interval")
workerWorkingDir = cmdWorker.Flag.String("workingDir", "", "working directory for the worker") workerReconnect = cmdWorker.Flag.Duration("reconnect", 5*time.Second, "reconnect delay")
workerMetricsPort = cmdWorker.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") workerMaxDetect = cmdWorker.Flag.Int("maxDetect", 1, "max concurrent detection requests")
workerMetricsIp = cmdWorker.Flag.String("metricsIp", "0.0.0.0", "Prometheus metrics listen IP") workerMaxExecute = cmdWorker.Flag.Int("maxExecute", 4, "max concurrent execute requests")
workerDebug = cmdWorker.Flag.Bool("debug", false, "serves runtime profiling data via pprof on the port specified by -debug.port") workerAddress = cmdWorker.Flag.String("address", "", "worker address advertised to admin")
workerDebugPort = cmdWorker.Flag.Int("debug.port", 6060, "http port for debugging") workerMetricsPort = cmdWorker.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
workerMetricsIp = cmdWorker.Flag.String("metricsIp", "0.0.0.0", "Prometheus metrics listen IP")
workerServerHeader = "SeaweedFS Worker " + version.VERSION workerDebug = cmdWorker.Flag.Bool("debug", false, "serves runtime profiling data via pprof on the port specified by -debug.port")
workerDebugPort = cmdWorker.Flag.Int("debug.port", 6060, "http port for debugging")
) )
func init() { func init() {
cmdWorker.Run = runWorker cmdWorker.Run = runWorker
// Set default capabilities from registered task types
// This happens after package imports have triggered auto-registration
tasks.SetDefaultCapabilitiesFromRegistry()
} }
func runWorker(cmd *Command, args []string) bool { func runWorker(cmd *Command, args []string) bool {
@@ -78,218 +56,17 @@ func runWorker(cmd *Command, args []string) bool {
grace.StartDebugServer(*workerDebugPort) grace.StartDebugServer(*workerDebugPort)
} }
util.LoadConfiguration("security", false) return runPluginWorkerWithOptions(pluginWorkerRunOptions{
AdminServer: *workerAdminServer,
glog.Infof("Starting maintenance worker") WorkerID: *workerID,
glog.Infof("Admin server: %s", *workerAdminServer) WorkingDir: *workerWorkingDir,
glog.Infof("Capabilities: %s", *workerCapabilities) JobTypes: *workerJobType,
Heartbeat: *workerHeartbeat,
// Parse capabilities Reconnect: *workerReconnect,
capabilities := parseCapabilities(*workerCapabilities) MaxDetect: *workerMaxDetect,
if len(capabilities) == 0 { MaxExecute: *workerMaxExecute,
glog.Fatalf("No valid capabilities specified") Address: *workerAddress,
return false MetricsPort: *workerMetricsPort,
} MetricsIP: *workerMetricsIp,
})
// Set working directory and create task-specific subdirectories
var baseWorkingDir string
if *workerWorkingDir != "" {
glog.Infof("Setting working directory to: %s", *workerWorkingDir)
if err := os.Chdir(*workerWorkingDir); err != nil {
glog.Fatalf("Failed to change working directory: %v", err)
return false
}
wd, err := os.Getwd()
if err != nil {
glog.Fatalf("Failed to get working directory: %v", err)
return false
}
baseWorkingDir = wd
glog.Infof("Current working directory: %s", baseWorkingDir)
} else {
// Use default working directory when not specified
wd, err := os.Getwd()
if err != nil {
glog.Fatalf("Failed to get current working directory: %v", err)
return false
}
baseWorkingDir = wd
glog.Infof("Using current working directory: %s", baseWorkingDir)
}
// Create task-specific subdirectories
for _, capability := range capabilities {
taskDir := filepath.Join(baseWorkingDir, string(capability))
if err := os.MkdirAll(taskDir, 0755); err != nil {
glog.Fatalf("Failed to create task directory %s: %v", taskDir, err)
return false
}
glog.Infof("Created task directory: %s", taskDir)
}
// Create gRPC dial option using TLS configuration
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.worker")
// Create worker configuration
config := &types.WorkerConfig{
AdminServer: *workerAdminServer,
Capabilities: capabilities,
MaxConcurrent: *workerMaxConcurrent,
HeartbeatInterval: *workerHeartbeatInterval,
TaskRequestInterval: *workerTaskRequestInterval,
BaseWorkingDir: baseWorkingDir,
GrpcDialOption: grpcDialOption,
}
// Create worker instance
workerInstance, err := worker.NewWorker(config)
if err != nil {
glog.Fatalf("Failed to create worker: %v", err)
return false
}
adminClient, err := worker.CreateAdminClient(*workerAdminServer, workerInstance.ID(), grpcDialOption)
if err != nil {
glog.Fatalf("Failed to create admin client: %v", err)
return false
}
// Set admin client
workerInstance.SetAdminClient(adminClient)
// Set working directory
if *workerWorkingDir != "" {
glog.Infof("Setting working directory to: %s", *workerWorkingDir)
if err := os.Chdir(*workerWorkingDir); err != nil {
glog.Fatalf("Failed to change working directory: %v", err)
return false
}
wd, err := os.Getwd()
if err != nil {
glog.Fatalf("Failed to get working directory: %v", err)
return false
}
glog.Infof("Current working directory: %s", wd)
}
// Start metrics HTTP server if port is specified
if *workerMetricsPort > 0 {
go startWorkerMetricsServer(*workerMetricsIp, *workerMetricsPort, workerInstance)
}
// Start the worker
err = workerInstance.Start()
if err != nil {
glog.Errorf("Failed to start worker: %v", err)
return false
}
// Set up signal handling
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
glog.Infof("Maintenance worker %s started successfully", workerInstance.ID())
glog.Infof("Press Ctrl+C to stop the worker")
// Wait for shutdown signal
<-sigChan
glog.Infof("Shutdown signal received, stopping worker...")
// Gracefully stop the worker
err = workerInstance.Stop()
if err != nil {
glog.Errorf("Error stopping worker: %v", err)
}
glog.Infof("Worker stopped")
return true
}
// parseCapabilities converts comma-separated capability string to task types
func parseCapabilities(capabilityStr string) []types.TaskType {
if capabilityStr == "" {
return nil
}
capabilityMap := map[string]types.TaskType{}
// Populate capabilityMap with registered task types
typesRegistry := tasks.GetGlobalTypesRegistry()
for taskType := range typesRegistry.GetAllDetectors() {
// Use the task type string directly as the key
capabilityMap[strings.ToLower(string(taskType))] = taskType
}
// Add common aliases for convenience
if taskType, exists := capabilityMap["erasure_coding"]; exists {
capabilityMap["ec"] = taskType
}
if taskType, exists := capabilityMap["remote_upload"]; exists {
capabilityMap["remote"] = taskType
}
if taskType, exists := capabilityMap["fix_replication"]; exists {
capabilityMap["replication"] = taskType
}
var capabilities []types.TaskType
parts := strings.Split(capabilityStr, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
if taskType, exists := capabilityMap[part]; exists {
capabilities = append(capabilities, taskType)
} else {
glog.Warningf("Unknown capability: %s", part)
}
}
return capabilities
}
// Legacy compatibility types for backward compatibility
// These will be deprecated in future versions
// WorkerStatus represents the current status of a worker (deprecated)
type WorkerStatus struct {
WorkerID string `json:"worker_id"`
Address string `json:"address"`
Status string `json:"status"`
Capabilities []types.TaskType `json:"capabilities"`
MaxConcurrent int `json:"max_concurrent"`
CurrentLoad int `json:"current_load"`
LastHeartbeat time.Time `json:"last_heartbeat"`
CurrentTasks []types.Task `json:"current_tasks"`
Uptime time.Duration `json:"uptime"`
TasksCompleted int `json:"tasks_completed"`
TasksFailed int `json:"tasks_failed"`
}
func workerHealthHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", workerServerHeader)
w.WriteHeader(http.StatusOK)
}
func workerReadyHandler(workerInstance *worker.Worker) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", workerServerHeader)
admin := workerInstance.GetAdmin()
if admin == nil || !admin.IsConnected() {
w.WriteHeader(http.StatusServiceUnavailable)
return
}
w.WriteHeader(http.StatusOK)
}
}
func startWorkerMetricsServer(ip string, port int, w *worker.Worker) {
mux := http.NewServeMux()
mux.HandleFunc("/health", workerHealthHandler)
mux.HandleFunc("/ready", workerReadyHandler(w))
mux.Handle("/metrics", promhttp.HandlerFor(statsCollect.Gather, promhttp.HandlerOpts{}))
glog.V(0).Infof("Starting worker metrics server at %s", statsCollect.JoinHostPort(ip, port))
if err := http.ListenAndServe(statsCollect.JoinHostPort(ip, port), mux); err != nil {
glog.Errorf("Worker metrics server failed to start: %v", err)
}
} }

View File

@@ -0,0 +1,348 @@
package command
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/seaweedfs/seaweedfs/weed/glog"
pluginworker "github.com/seaweedfs/seaweedfs/weed/plugin/worker"
"github.com/seaweedfs/seaweedfs/weed/security"
statsCollect "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"google.golang.org/grpc"
)
const defaultPluginWorkerJobTypes = "vacuum,volume_balance,erasure_coding"
type pluginWorkerRunOptions struct {
AdminServer string
WorkerID string
WorkingDir string
JobTypes string
Heartbeat time.Duration
Reconnect time.Duration
MaxDetect int
MaxExecute int
Address string
MetricsPort int
MetricsIP string
}
func runPluginWorkerWithOptions(options pluginWorkerRunOptions) bool {
util.LoadConfiguration("security", false)
options.AdminServer = strings.TrimSpace(options.AdminServer)
if options.AdminServer == "" {
options.AdminServer = "localhost:23646"
}
options.JobTypes = strings.TrimSpace(options.JobTypes)
if options.JobTypes == "" {
options.JobTypes = defaultPluginWorkerJobTypes
}
if options.Heartbeat <= 0 {
options.Heartbeat = 15 * time.Second
}
if options.Reconnect <= 0 {
options.Reconnect = 5 * time.Second
}
if options.MaxDetect <= 0 {
options.MaxDetect = 1
}
if options.MaxExecute <= 0 {
options.MaxExecute = 4
}
options.MetricsIP = strings.TrimSpace(options.MetricsIP)
if options.MetricsIP == "" {
options.MetricsIP = "0.0.0.0"
}
resolvedAdminServer := resolvePluginWorkerAdminServer(options.AdminServer)
if resolvedAdminServer != options.AdminServer {
fmt.Printf("Resolved admin worker gRPC endpoint: %s -> %s\n", options.AdminServer, resolvedAdminServer)
}
dialOption := security.LoadClientTLS(util.GetViper(), "grpc.worker")
workerID, err := resolvePluginWorkerID(options.WorkerID, options.WorkingDir)
if err != nil {
glog.Errorf("Failed to resolve plugin worker ID: %v", err)
return false
}
handlers, err := buildPluginWorkerHandlers(options.JobTypes, dialOption)
if err != nil {
glog.Errorf("Failed to build plugin worker handlers: %v", err)
return false
}
worker, err := pluginworker.NewWorker(pluginworker.WorkerOptions{
AdminServer: resolvedAdminServer,
WorkerID: workerID,
WorkerVersion: version.Version(),
WorkerAddress: options.Address,
HeartbeatInterval: options.Heartbeat,
ReconnectDelay: options.Reconnect,
MaxDetectionConcurrency: options.MaxDetect,
MaxExecutionConcurrency: options.MaxExecute,
GrpcDialOption: dialOption,
Handlers: handlers,
})
if err != nil {
glog.Errorf("Failed to create plugin worker: %v", err)
return false
}
if options.MetricsPort > 0 {
go startPluginWorkerMetricsServer(options.MetricsIP, options.MetricsPort, worker)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(sigCh)
go func() {
sig := <-sigCh
fmt.Printf("\nReceived signal %v, stopping plugin worker...\n", sig)
cancel()
}()
fmt.Printf("Starting plugin worker (admin=%s)\n", resolvedAdminServer)
if err := worker.Run(ctx); err != nil {
glog.Errorf("Plugin worker stopped with error: %v", err)
return false
}
fmt.Println("Plugin worker stopped")
return true
}
func resolvePluginWorkerID(explicitID string, workingDir string) (string, error) {
id := strings.TrimSpace(explicitID)
if id != "" {
return id, nil
}
workingDir = strings.TrimSpace(workingDir)
if workingDir == "" {
return "", nil
}
if err := os.MkdirAll(workingDir, 0755); err != nil {
return "", err
}
workerIDPath := filepath.Join(workingDir, "plugin.worker.id")
if data, err := os.ReadFile(workerIDPath); err == nil {
if persisted := strings.TrimSpace(string(data)); persisted != "" {
return persisted, nil
}
}
generated := fmt.Sprintf("plugin-%d", time.Now().UnixNano())
if err := os.WriteFile(workerIDPath, []byte(generated+"\n"), 0644); err != nil {
return "", err
}
return generated, nil
}
func buildPluginWorkerHandler(jobType string, dialOption grpc.DialOption) (pluginworker.JobHandler, error) {
canonicalJobType, err := canonicalPluginWorkerJobType(jobType)
if err != nil {
return nil, err
}
switch canonicalJobType {
case "vacuum":
return pluginworker.NewVacuumHandler(dialOption), nil
case "volume_balance":
return pluginworker.NewVolumeBalanceHandler(dialOption), nil
case "erasure_coding":
return pluginworker.NewErasureCodingHandler(dialOption), nil
default:
return nil, fmt.Errorf("unsupported plugin job type %q", canonicalJobType)
}
}
func buildPluginWorkerHandlers(jobTypes string, dialOption grpc.DialOption) ([]pluginworker.JobHandler, error) {
parsedJobTypes, err := parsePluginWorkerJobTypes(jobTypes)
if err != nil {
return nil, err
}
handlers := make([]pluginworker.JobHandler, 0, len(parsedJobTypes))
for _, jobType := range parsedJobTypes {
handler, buildErr := buildPluginWorkerHandler(jobType, dialOption)
if buildErr != nil {
return nil, buildErr
}
handlers = append(handlers, handler)
}
return handlers, nil
}
func parsePluginWorkerJobTypes(jobTypes string) ([]string, error) {
jobTypes = strings.TrimSpace(jobTypes)
if jobTypes == "" {
return []string{"vacuum"}, nil
}
parts := strings.Split(jobTypes, ",")
parsed := make([]string, 0, len(parts))
seen := make(map[string]struct{}, len(parts))
for _, part := range parts {
part = strings.TrimSpace(part)
if part == "" {
continue
}
canonical, err := canonicalPluginWorkerJobType(part)
if err != nil {
return nil, err
}
if _, found := seen[canonical]; found {
continue
}
seen[canonical] = struct{}{}
parsed = append(parsed, canonical)
}
if len(parsed) == 0 {
return []string{"vacuum"}, nil
}
return parsed, nil
}
func canonicalPluginWorkerJobType(jobType string) (string, error) {
switch strings.ToLower(strings.TrimSpace(jobType)) {
case "", "vacuum":
return "vacuum", nil
case "volume_balance", "balance", "volume.balance", "volume-balance":
return "volume_balance", nil
case "erasure_coding", "erasure-coding", "erasure.coding", "ec":
return "erasure_coding", nil
default:
return "", fmt.Errorf("unsupported plugin job type %q", jobType)
}
}
func resolvePluginWorkerAdminServer(adminServer string) string {
adminServer = strings.TrimSpace(adminServer)
host, httpPort, hasExplicitGrpcPort, err := parsePluginWorkerAdminAddress(adminServer)
if err != nil || hasExplicitGrpcPort {
return adminServer
}
workerGrpcPort, err := fetchPluginWorkerGrpcPort(host, httpPort)
if err != nil || workerGrpcPort <= 0 {
return adminServer
}
// Keep canonical host:http form when admin gRPC follows the default +10000 rule.
if workerGrpcPort == httpPort+10000 {
return adminServer
}
return fmt.Sprintf("%s:%d.%d", host, httpPort, workerGrpcPort)
}
func parsePluginWorkerAdminAddress(adminServer string) (host string, httpPort int, hasExplicitGrpcPort bool, err error) {
adminServer = strings.TrimSpace(adminServer)
colonIndex := strings.LastIndex(adminServer, ":")
if colonIndex <= 0 || colonIndex >= len(adminServer)-1 {
return "", 0, false, fmt.Errorf("invalid admin address %q", adminServer)
}
host = adminServer[:colonIndex]
portPart := adminServer[colonIndex+1:]
if dotIndex := strings.LastIndex(portPart, "."); dotIndex > 0 && dotIndex < len(portPart)-1 {
if _, parseErr := strconv.Atoi(portPart[dotIndex+1:]); parseErr == nil {
hasExplicitGrpcPort = true
portPart = portPart[:dotIndex]
}
}
httpPort, err = strconv.Atoi(portPart)
if err != nil || httpPort <= 0 {
return "", 0, false, fmt.Errorf("invalid admin http port in %q", adminServer)
}
return host, httpPort, hasExplicitGrpcPort, nil
}
func fetchPluginWorkerGrpcPort(host string, httpPort int) (int, error) {
client := &http.Client{Timeout: 2 * time.Second}
address := util.JoinHostPort(host, httpPort)
var lastErr error
for _, scheme := range []string{"http", "https"} {
statusURL := fmt.Sprintf("%s://%s/api/plugin/status", scheme, address)
resp, err := client.Get(statusURL)
if err != nil {
lastErr = err
continue
}
var payload struct {
WorkerGrpcPort int `json:"worker_grpc_port"`
}
decodeErr := json.NewDecoder(resp.Body).Decode(&payload)
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
lastErr = fmt.Errorf("status code %d from %s", resp.StatusCode, statusURL)
continue
}
if decodeErr != nil {
lastErr = fmt.Errorf("decode plugin status from %s: %w", statusURL, decodeErr)
continue
}
if payload.WorkerGrpcPort <= 0 {
lastErr = fmt.Errorf("plugin status from %s returned empty worker_grpc_port", statusURL)
continue
}
return payload.WorkerGrpcPort, nil
}
if lastErr == nil {
lastErr = fmt.Errorf("plugin status endpoint unavailable")
}
return 0, lastErr
}
func pluginWorkerHealthHandler(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
}
func pluginWorkerReadyHandler(pluginRuntime *pluginworker.Worker) http.HandlerFunc {
return func(w http.ResponseWriter, _ *http.Request) {
if pluginRuntime == nil || !pluginRuntime.IsConnected() {
w.WriteHeader(http.StatusServiceUnavailable)
return
}
w.WriteHeader(http.StatusOK)
}
}
func startPluginWorkerMetricsServer(ip string, port int, pluginRuntime *pluginworker.Worker) {
mux := http.NewServeMux()
mux.HandleFunc("/health", pluginWorkerHealthHandler)
mux.HandleFunc("/ready", pluginWorkerReadyHandler(pluginRuntime))
mux.Handle("/metrics", promhttp.HandlerFor(statsCollect.Gather, promhttp.HandlerOpts{}))
glog.V(0).Infof("Starting plugin worker metrics server at %s", statsCollect.JoinHostPort(ip, port))
if err := http.ListenAndServe(statsCollect.JoinHostPort(ip, port), mux); err != nil {
glog.Errorf("Plugin worker metrics server failed to start: %v", err)
}
}

View File

@@ -0,0 +1,13 @@
package command
import "testing"
func TestWorkerDefaultJobTypes(t *testing.T) {
jobTypes, err := parsePluginWorkerJobTypes(*workerJobType)
if err != nil {
t.Fatalf("parsePluginWorkerJobTypes(default worker flag) err = %v", err)
}
if len(jobTypes) != 3 {
t.Fatalf("expected default worker job types to include 3 handlers, got %v", jobTypes)
}
}

View File

@@ -14,6 +14,8 @@ gen:
protoc mq_schema.proto --go_out=./schema_pb --go-grpc_out=./schema_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative protoc mq_schema.proto --go_out=./schema_pb --go-grpc_out=./schema_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative
protoc mq_agent.proto --go_out=./mq_agent_pb --go-grpc_out=./mq_agent_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative protoc mq_agent.proto --go_out=./mq_agent_pb --go-grpc_out=./mq_agent_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative
protoc worker.proto --go_out=./worker_pb --go-grpc_out=./worker_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative protoc worker.proto --go_out=./worker_pb --go-grpc_out=./worker_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative
mkdir -p ./plugin_pb
protoc plugin.proto --go_out=./plugin_pb --go-grpc_out=./plugin_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative
# protoc filer.proto --java_out=../../other/java/client/src/main/java # protoc filer.proto --java_out=../../other/java/client/src/main/java
cp filer.proto ../../other/java/client/src/main/proto cp filer.proto ../../other/java/client/src/main/proto

443
weed/pb/plugin.proto Normal file
View File

@@ -0,0 +1,443 @@
syntax = "proto3";
package plugin;
option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
// PluginControlService is the admin-facing stream API for external workers.
// Workers initiate and keep this stream alive; all control plane traffic flows through it.
service PluginControlService {
rpc WorkerStream(stream WorkerToAdminMessage) returns (stream AdminToWorkerMessage);
}
// WorkerToAdminMessage carries worker-originated events and responses.
message WorkerToAdminMessage {
string worker_id = 1;
google.protobuf.Timestamp sent_at = 2;
oneof body {
WorkerHello hello = 10;
WorkerHeartbeat heartbeat = 11;
WorkerAcknowledge acknowledge = 12;
ConfigSchemaResponse config_schema_response = 13;
DetectionProposals detection_proposals = 14;
DetectionComplete detection_complete = 15;
JobProgressUpdate job_progress_update = 16;
JobCompleted job_completed = 17;
}
}
// AdminToWorkerMessage carries commands and lifecycle notifications from admin.
message AdminToWorkerMessage {
string request_id = 1;
google.protobuf.Timestamp sent_at = 2;
oneof body {
AdminHello hello = 10;
RequestConfigSchema request_config_schema = 11;
RunDetectionRequest run_detection_request = 12;
ExecuteJobRequest execute_job_request = 13;
CancelRequest cancel_request = 14;
AdminShutdown shutdown = 15;
}
}
message WorkerHello {
string worker_id = 1;
string worker_instance_id = 2;
string address = 3;
string worker_version = 4;
string protocol_version = 5;
repeated JobTypeCapability capabilities = 6;
map<string, string> metadata = 7;
}
message AdminHello {
bool accepted = 1;
string message = 2;
int32 heartbeat_interval_seconds = 3;
int32 reconnect_delay_seconds = 4;
}
message WorkerHeartbeat {
string worker_id = 1;
repeated RunningWork running_work = 2;
int32 detection_slots_used = 3;
int32 detection_slots_total = 4;
int32 execution_slots_used = 5;
int32 execution_slots_total = 6;
map<string, int32> queued_jobs_by_type = 7;
map<string, string> metadata = 8;
}
message WorkerAcknowledge {
string request_id = 1;
bool accepted = 2;
string message = 3;
}
message RunningWork {
string work_id = 1;
WorkKind kind = 2;
string job_type = 3;
JobState state = 4;
double progress_percent = 5;
string stage = 6;
}
message JobTypeCapability {
string job_type = 1;
bool can_detect = 2;
bool can_execute = 3;
int32 max_detection_concurrency = 4;
int32 max_execution_concurrency = 5;
string display_name = 6;
string description = 7;
}
message RequestConfigSchema {
string job_type = 1;
bool force_refresh = 2;
}
message ConfigSchemaResponse {
string request_id = 1;
string job_type = 2;
bool success = 3;
string error_message = 4;
JobTypeDescriptor job_type_descriptor = 5;
}
// JobTypeDescriptor defines one job type contract, including UI schema and defaults.
message JobTypeDescriptor {
string job_type = 1;
string display_name = 2;
string description = 3;
string icon = 4;
uint32 descriptor_version = 5;
// Admin-owned options such as detection frequency and dispatch concurrency.
ConfigForm admin_config_form = 6;
// Worker-owned options used during detection and execution.
ConfigForm worker_config_form = 7;
AdminRuntimeDefaults admin_runtime_defaults = 8;
map<string, ConfigValue> worker_default_values = 9;
}
message ConfigForm {
string form_id = 1;
string title = 2;
string description = 3;
repeated ConfigSection sections = 4;
map<string, ConfigValue> default_values = 5;
}
message ConfigSection {
string section_id = 1;
string title = 2;
string description = 3;
repeated ConfigField fields = 4;
}
message ConfigField {
string name = 1;
string label = 2;
string description = 3;
string help_text = 4;
string placeholder = 5;
ConfigFieldType field_type = 6;
ConfigWidget widget = 7;
bool required = 8;
bool read_only = 9;
bool sensitive = 10;
ConfigValue min_value = 11;
ConfigValue max_value = 12;
repeated ConfigOption options = 13;
repeated ValidationRule validation_rules = 14;
// Simple visibility dependency: show this field when the referenced field equals value.
string visible_when_field = 15;
ConfigValue visible_when_equals = 16;
}
message ConfigOption {
string value = 1;
string label = 2;
string description = 3;
bool disabled = 4;
}
message ValidationRule {
ValidationRuleType type = 1;
string expression = 2;
string error_message = 3;
}
message ConfigValue {
oneof kind {
bool bool_value = 1;
int64 int64_value = 2;
double double_value = 3;
string string_value = 4;
bytes bytes_value = 5;
google.protobuf.Duration duration_value = 6;
StringList string_list = 7;
Int64List int64_list = 8;
DoubleList double_list = 9;
BoolList bool_list = 10;
ValueList list_value = 11;
ValueMap map_value = 12;
}
}
message StringList {
repeated string values = 1;
}
message Int64List {
repeated int64 values = 1;
}
message DoubleList {
repeated double values = 1;
}
message BoolList {
repeated bool values = 1;
}
message ValueList {
repeated ConfigValue values = 1;
}
message ValueMap {
map<string, ConfigValue> fields = 1;
}
message AdminRuntimeDefaults {
bool enabled = 1;
int32 detection_interval_seconds = 2;
int32 detection_timeout_seconds = 3;
int32 max_jobs_per_detection = 4;
int32 global_execution_concurrency = 5;
int32 per_worker_execution_concurrency = 6;
int32 retry_limit = 7;
int32 retry_backoff_seconds = 8;
}
message AdminRuntimeConfig {
bool enabled = 1;
int32 detection_interval_seconds = 2;
int32 detection_timeout_seconds = 3;
int32 max_jobs_per_detection = 4;
int32 global_execution_concurrency = 5;
int32 per_worker_execution_concurrency = 6;
int32 retry_limit = 7;
int32 retry_backoff_seconds = 8;
}
message RunDetectionRequest {
string request_id = 1;
string job_type = 2;
int64 detection_sequence = 3;
AdminRuntimeConfig admin_runtime = 4;
map<string, ConfigValue> admin_config_values = 5;
map<string, ConfigValue> worker_config_values = 6;
ClusterContext cluster_context = 7;
google.protobuf.Timestamp last_successful_run = 8;
int32 max_results = 9;
}
message DetectionProposals {
string request_id = 1;
string job_type = 2;
repeated JobProposal proposals = 3;
bool has_more = 4;
}
message DetectionComplete {
string request_id = 1;
string job_type = 2;
bool success = 3;
string error_message = 4;
int32 total_proposals = 5;
}
message JobProposal {
string proposal_id = 1;
string dedupe_key = 2;
string job_type = 3;
JobPriority priority = 4;
string summary = 5;
string detail = 6;
map<string, ConfigValue> parameters = 7;
map<string, string> labels = 8;
google.protobuf.Timestamp not_before = 9;
google.protobuf.Timestamp expires_at = 10;
}
message ExecuteJobRequest {
string request_id = 1;
JobSpec job = 2;
AdminRuntimeConfig admin_runtime = 3;
map<string, ConfigValue> admin_config_values = 4;
map<string, ConfigValue> worker_config_values = 5;
ClusterContext cluster_context = 6;
int32 attempt = 7;
}
message JobSpec {
string job_id = 1;
string job_type = 2;
string dedupe_key = 3;
JobPriority priority = 4;
string summary = 5;
string detail = 6;
map<string, ConfigValue> parameters = 7;
map<string, string> labels = 8;
google.protobuf.Timestamp created_at = 9;
google.protobuf.Timestamp scheduled_at = 10;
}
message JobProgressUpdate {
string request_id = 1;
string job_id = 2;
string job_type = 3;
JobState state = 4;
double progress_percent = 5;
string stage = 6;
string message = 7;
map<string, ConfigValue> metrics = 8;
repeated ActivityEvent activities = 9;
google.protobuf.Timestamp updated_at = 10;
}
message JobCompleted {
string request_id = 1;
string job_id = 2;
string job_type = 3;
bool success = 4;
string error_message = 5;
JobResult result = 6;
repeated ActivityEvent activities = 7;
google.protobuf.Timestamp completed_at = 8;
}
message JobResult {
map<string, ConfigValue> output_values = 1;
string summary = 2;
}
message ClusterContext {
repeated string master_grpc_addresses = 1;
repeated string filer_grpc_addresses = 2;
repeated string volume_grpc_addresses = 3;
map<string, string> metadata = 4;
}
message ActivityEvent {
ActivitySource source = 1;
string message = 2;
string stage = 3;
map<string, ConfigValue> details = 4;
google.protobuf.Timestamp created_at = 5;
}
message CancelRequest {
string target_id = 1;
WorkKind target_kind = 2;
string reason = 3;
bool force = 4;
}
message AdminShutdown {
string reason = 1;
int32 grace_period_seconds = 2;
}
// PersistedJobTypeConfig is the admin-side on-disk model per job type.
message PersistedJobTypeConfig {
string job_type = 1;
uint32 descriptor_version = 2;
map<string, ConfigValue> admin_config_values = 3;
map<string, ConfigValue> worker_config_values = 4;
AdminRuntimeConfig admin_runtime = 5;
google.protobuf.Timestamp updated_at = 6;
string updated_by = 7;
}
enum WorkKind {
WORK_KIND_UNSPECIFIED = 0;
WORK_KIND_DETECTION = 1;
WORK_KIND_EXECUTION = 2;
}
enum JobPriority {
JOB_PRIORITY_UNSPECIFIED = 0;
JOB_PRIORITY_LOW = 1;
JOB_PRIORITY_NORMAL = 2;
JOB_PRIORITY_HIGH = 3;
JOB_PRIORITY_CRITICAL = 4;
}
enum JobState {
JOB_STATE_UNSPECIFIED = 0;
JOB_STATE_PENDING = 1;
JOB_STATE_ASSIGNED = 2;
JOB_STATE_RUNNING = 3;
JOB_STATE_SUCCEEDED = 4;
JOB_STATE_FAILED = 5;
JOB_STATE_CANCELED = 6;
}
enum ConfigFieldType {
CONFIG_FIELD_TYPE_UNSPECIFIED = 0;
CONFIG_FIELD_TYPE_BOOL = 1;
CONFIG_FIELD_TYPE_INT64 = 2;
CONFIG_FIELD_TYPE_DOUBLE = 3;
CONFIG_FIELD_TYPE_STRING = 4;
CONFIG_FIELD_TYPE_BYTES = 5;
CONFIG_FIELD_TYPE_DURATION = 6;
CONFIG_FIELD_TYPE_ENUM = 7;
CONFIG_FIELD_TYPE_LIST = 8;
CONFIG_FIELD_TYPE_OBJECT = 9;
}
enum ConfigWidget {
CONFIG_WIDGET_UNSPECIFIED = 0;
CONFIG_WIDGET_TOGGLE = 1;
CONFIG_WIDGET_TEXT = 2;
CONFIG_WIDGET_TEXTAREA = 3;
CONFIG_WIDGET_NUMBER = 4;
CONFIG_WIDGET_SELECT = 5;
CONFIG_WIDGET_MULTI_SELECT = 6;
CONFIG_WIDGET_DURATION = 7;
CONFIG_WIDGET_PASSWORD = 8;
}
enum ValidationRuleType {
VALIDATION_RULE_TYPE_UNSPECIFIED = 0;
VALIDATION_RULE_TYPE_REGEX = 1;
VALIDATION_RULE_TYPE_MIN_LENGTH = 2;
VALIDATION_RULE_TYPE_MAX_LENGTH = 3;
VALIDATION_RULE_TYPE_MIN_ITEMS = 4;
VALIDATION_RULE_TYPE_MAX_ITEMS = 5;
VALIDATION_RULE_TYPE_CUSTOM = 6;
}
enum ActivitySource {
ACTIVITY_SOURCE_UNSPECIFIED = 0;
ACTIVITY_SOURCE_ADMIN = 1;
ACTIVITY_SOURCE_DETECTOR = 2;
ACTIVITY_SOURCE_EXECUTOR = 3;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,121 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v6.33.4
// source: plugin.proto
package plugin_pb
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
PluginControlService_WorkerStream_FullMethodName = "/plugin.PluginControlService/WorkerStream"
)
// PluginControlServiceClient is the client API for PluginControlService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// PluginControlService is the admin-facing stream API for external workers.
// Workers initiate and keep this stream alive; all control plane traffic flows through it.
type PluginControlServiceClient interface {
WorkerStream(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[WorkerToAdminMessage, AdminToWorkerMessage], error)
}
type pluginControlServiceClient struct {
cc grpc.ClientConnInterface
}
func NewPluginControlServiceClient(cc grpc.ClientConnInterface) PluginControlServiceClient {
return &pluginControlServiceClient{cc}
}
func (c *pluginControlServiceClient) WorkerStream(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[WorkerToAdminMessage, AdminToWorkerMessage], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &PluginControlService_ServiceDesc.Streams[0], PluginControlService_WorkerStream_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[WorkerToAdminMessage, AdminToWorkerMessage]{ClientStream: stream}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type PluginControlService_WorkerStreamClient = grpc.BidiStreamingClient[WorkerToAdminMessage, AdminToWorkerMessage]
// PluginControlServiceServer is the server API for PluginControlService service.
// All implementations must embed UnimplementedPluginControlServiceServer
// for forward compatibility.
//
// PluginControlService is the admin-facing stream API for external workers.
// Workers initiate and keep this stream alive; all control plane traffic flows through it.
type PluginControlServiceServer interface {
WorkerStream(grpc.BidiStreamingServer[WorkerToAdminMessage, AdminToWorkerMessage]) error
mustEmbedUnimplementedPluginControlServiceServer()
}
// UnimplementedPluginControlServiceServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedPluginControlServiceServer struct{}
func (UnimplementedPluginControlServiceServer) WorkerStream(grpc.BidiStreamingServer[WorkerToAdminMessage, AdminToWorkerMessage]) error {
return status.Errorf(codes.Unimplemented, "method WorkerStream not implemented")
}
func (UnimplementedPluginControlServiceServer) mustEmbedUnimplementedPluginControlServiceServer() {}
func (UnimplementedPluginControlServiceServer) testEmbeddedByValue() {}
// UnsafePluginControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to PluginControlServiceServer will
// result in compilation errors.
type UnsafePluginControlServiceServer interface {
mustEmbedUnimplementedPluginControlServiceServer()
}
func RegisterPluginControlServiceServer(s grpc.ServiceRegistrar, srv PluginControlServiceServer) {
// If the following call pancis, it indicates UnimplementedPluginControlServiceServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&PluginControlService_ServiceDesc, srv)
}
func _PluginControlService_WorkerStream_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(PluginControlServiceServer).WorkerStream(&grpc.GenericServerStream[WorkerToAdminMessage, AdminToWorkerMessage]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type PluginControlService_WorkerStreamServer = grpc.BidiStreamingServer[WorkerToAdminMessage, AdminToWorkerMessage]
// PluginControlService_ServiceDesc is the grpc.ServiceDesc for PluginControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var PluginControlService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "plugin.PluginControlService",
HandlerType: (*PluginControlServiceServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "WorkerStream",
Handler: _PluginControlService_WorkerStream_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "plugin.proto",
}

View File

@@ -0,0 +1,899 @@
package pluginworker
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/admin/topology"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
ecstorage "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
erasurecodingtask "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
workertypes "github.com/seaweedfs/seaweedfs/weed/worker/types"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
)
type erasureCodingWorkerConfig struct {
TaskConfig *erasurecodingtask.Config
MinIntervalSeconds int
}
// ErasureCodingHandler is the plugin job handler for erasure coding.
type ErasureCodingHandler struct {
grpcDialOption grpc.DialOption
}
func NewErasureCodingHandler(grpcDialOption grpc.DialOption) *ErasureCodingHandler {
return &ErasureCodingHandler{grpcDialOption: grpcDialOption}
}
func (h *ErasureCodingHandler) Capability() *plugin_pb.JobTypeCapability {
return &plugin_pb.JobTypeCapability{
JobType: "erasure_coding",
CanDetect: true,
CanExecute: true,
MaxDetectionConcurrency: 1,
MaxExecutionConcurrency: 1,
DisplayName: "Erasure Coding",
Description: "Converts full and quiet volumes into EC shards",
}
}
func (h *ErasureCodingHandler) Descriptor() *plugin_pb.JobTypeDescriptor {
return &plugin_pb.JobTypeDescriptor{
JobType: "erasure_coding",
DisplayName: "Erasure Coding",
Description: "Detect and execute erasure coding for suitable volumes",
Icon: "fas fa-shield-alt",
DescriptorVersion: 1,
AdminConfigForm: &plugin_pb.ConfigForm{
FormId: "erasure-coding-admin",
Title: "Erasure Coding Admin Config",
Description: "Admin-side controls for erasure coding detection scope.",
Sections: []*plugin_pb.ConfigSection{
{
SectionId: "scope",
Title: "Scope",
Description: "Optional filters applied before erasure coding detection.",
Fields: []*plugin_pb.ConfigField{
{
Name: "collection_filter",
Label: "Collection Filter",
Description: "Only detect erasure coding opportunities in this collection when set.",
Placeholder: "all collections",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_STRING,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_TEXT,
},
},
},
},
DefaultValues: map[string]*plugin_pb.ConfigValue{
"collection_filter": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: ""},
},
},
},
WorkerConfigForm: &plugin_pb.ConfigForm{
FormId: "erasure-coding-worker",
Title: "Erasure Coding Worker Config",
Description: "Worker-side detection thresholds.",
Sections: []*plugin_pb.ConfigSection{
{
SectionId: "thresholds",
Title: "Detection Thresholds",
Description: "Controls for when erasure coding jobs should be proposed.",
Fields: []*plugin_pb.ConfigField{
{
Name: "quiet_for_seconds",
Label: "Quiet Period (s)",
Description: "Volume must remain unmodified for at least this duration.",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_INT64,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_NUMBER,
Required: true,
MinValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 0}},
},
{
Name: "fullness_ratio",
Label: "Fullness Ratio",
Description: "Minimum volume fullness ratio to trigger erasure coding.",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_DOUBLE,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_NUMBER,
Required: true,
MinValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0}},
MaxValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 1}},
},
{
Name: "min_size_mb",
Label: "Minimum Volume Size (MB)",
Description: "Only volumes larger than this size are considered.",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_INT64,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_NUMBER,
Required: true,
MinValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 1}},
},
{
Name: "min_interval_seconds",
Label: "Minimum Detection Interval (s)",
Description: "Skip detection if the last successful run is more recent than this interval.",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_INT64,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_NUMBER,
Required: true,
MinValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 0}},
},
},
},
},
DefaultValues: map[string]*plugin_pb.ConfigValue{
"quiet_for_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 300},
},
"fullness_ratio": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0.8},
},
"min_size_mb": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 30},
},
"min_interval_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 60},
},
},
},
AdminRuntimeDefaults: &plugin_pb.AdminRuntimeDefaults{
Enabled: true,
DetectionIntervalSeconds: 60 * 5,
DetectionTimeoutSeconds: 300,
MaxJobsPerDetection: 500,
GlobalExecutionConcurrency: 16,
PerWorkerExecutionConcurrency: 4,
RetryLimit: 1,
RetryBackoffSeconds: 30,
},
WorkerDefaultValues: map[string]*plugin_pb.ConfigValue{
"quiet_for_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 300},
},
"fullness_ratio": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0.8},
},
"min_size_mb": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 30},
},
"min_interval_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 60},
},
},
}
}
func (h *ErasureCodingHandler) Detect(
ctx context.Context,
request *plugin_pb.RunDetectionRequest,
sender DetectionSender,
) error {
if request == nil {
return fmt.Errorf("run detection request is nil")
}
if sender == nil {
return fmt.Errorf("detection sender is nil")
}
if request.JobType != "" && request.JobType != "erasure_coding" {
return fmt.Errorf("job type %q is not handled by erasure_coding worker", request.JobType)
}
workerConfig := deriveErasureCodingWorkerConfig(request.GetWorkerConfigValues())
if shouldSkipDetectionByInterval(request.GetLastSuccessfulRun(), workerConfig.MinIntervalSeconds) {
minInterval := time.Duration(workerConfig.MinIntervalSeconds) * time.Second
_ = sender.SendActivity(buildDetectorActivity(
"skipped_by_interval",
fmt.Sprintf("ERASURE CODING: Detection skipped due to min interval (%s)", minInterval),
map[string]*plugin_pb.ConfigValue{
"min_interval_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(workerConfig.MinIntervalSeconds)},
},
},
))
if err := sender.SendProposals(&plugin_pb.DetectionProposals{
JobType: "erasure_coding",
Proposals: []*plugin_pb.JobProposal{},
HasMore: false,
}); err != nil {
return err
}
return sender.SendComplete(&plugin_pb.DetectionComplete{
JobType: "erasure_coding",
Success: true,
TotalProposals: 0,
})
}
collectionFilter := strings.TrimSpace(readStringConfig(request.GetAdminConfigValues(), "collection_filter", ""))
if collectionFilter != "" {
workerConfig.TaskConfig.CollectionFilter = collectionFilter
}
masters := make([]string, 0)
if request.ClusterContext != nil {
masters = append(masters, request.ClusterContext.MasterGrpcAddresses...)
}
metrics, activeTopology, err := h.collectVolumeMetrics(ctx, masters, collectionFilter)
if err != nil {
return err
}
clusterInfo := &workertypes.ClusterInfo{ActiveTopology: activeTopology}
results, err := erasurecodingtask.Detection(metrics, clusterInfo, workerConfig.TaskConfig)
if err != nil {
return err
}
if traceErr := emitErasureCodingDetectionDecisionTrace(sender, metrics, workerConfig.TaskConfig, results); traceErr != nil {
glog.Warningf("Plugin worker failed to emit erasure_coding detection trace: %v", traceErr)
}
maxResults := int(request.MaxResults)
hasMore := false
if maxResults > 0 && len(results) > maxResults {
hasMore = true
results = results[:maxResults]
}
proposals := make([]*plugin_pb.JobProposal, 0, len(results))
for _, result := range results {
proposal, proposalErr := buildErasureCodingProposal(result)
if proposalErr != nil {
glog.Warningf("Plugin worker skip invalid erasure_coding proposal: %v", proposalErr)
continue
}
proposals = append(proposals, proposal)
}
if err := sender.SendProposals(&plugin_pb.DetectionProposals{
JobType: "erasure_coding",
Proposals: proposals,
HasMore: hasMore,
}); err != nil {
return err
}
return sender.SendComplete(&plugin_pb.DetectionComplete{
JobType: "erasure_coding",
Success: true,
TotalProposals: int32(len(proposals)),
})
}
func emitErasureCodingDetectionDecisionTrace(
sender DetectionSender,
metrics []*workertypes.VolumeHealthMetrics,
taskConfig *erasurecodingtask.Config,
results []*workertypes.TaskDetectionResult,
) error {
if sender == nil || taskConfig == nil {
return nil
}
quietThreshold := time.Duration(taskConfig.QuietForSeconds) * time.Second
minSizeBytes := uint64(taskConfig.MinSizeMB) * 1024 * 1024
allowedCollections := make(map[string]bool)
if strings.TrimSpace(taskConfig.CollectionFilter) != "" {
for _, collection := range strings.Split(taskConfig.CollectionFilter, ",") {
trimmed := strings.TrimSpace(collection)
if trimmed != "" {
allowedCollections[trimmed] = true
}
}
}
volumeGroups := make(map[uint32][]*workertypes.VolumeHealthMetrics)
for _, metric := range metrics {
if metric == nil {
continue
}
volumeGroups[metric.VolumeID] = append(volumeGroups[metric.VolumeID], metric)
}
skippedAlreadyEC := 0
skippedTooSmall := 0
skippedCollectionFilter := 0
skippedQuietTime := 0
skippedFullness := 0
for _, groupMetrics := range volumeGroups {
if len(groupMetrics) == 0 {
continue
}
metric := groupMetrics[0]
for _, candidate := range groupMetrics {
if candidate != nil && candidate.Server < metric.Server {
metric = candidate
}
}
if metric == nil {
continue
}
if metric.IsECVolume {
skippedAlreadyEC++
continue
}
if metric.Size < minSizeBytes {
skippedTooSmall++
continue
}
if len(allowedCollections) > 0 && !allowedCollections[metric.Collection] {
skippedCollectionFilter++
continue
}
if metric.Age < quietThreshold {
skippedQuietTime++
}
if metric.FullnessRatio < taskConfig.FullnessRatio {
skippedFullness++
}
}
totalVolumes := len(metrics)
summaryMessage := ""
if len(results) == 0 {
summaryMessage = fmt.Sprintf(
"EC detection: No tasks created for %d volumes (skipped: %d already EC, %d too small, %d filtered, %d not quiet, %d not full)",
totalVolumes,
skippedAlreadyEC,
skippedTooSmall,
skippedCollectionFilter,
skippedQuietTime,
skippedFullness,
)
} else {
summaryMessage = fmt.Sprintf(
"EC detection: Created %d task(s) from %d volumes (skipped: %d already EC, %d too small, %d filtered, %d not quiet, %d not full)",
len(results),
totalVolumes,
skippedAlreadyEC,
skippedTooSmall,
skippedCollectionFilter,
skippedQuietTime,
skippedFullness,
)
}
if err := sender.SendActivity(buildDetectorActivity("decision_summary", summaryMessage, map[string]*plugin_pb.ConfigValue{
"total_volumes": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(totalVolumes)},
},
"selected_tasks": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(len(results))},
},
"skipped_already_ec": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(skippedAlreadyEC)},
},
"skipped_too_small": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(skippedTooSmall)},
},
"skipped_filtered": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(skippedCollectionFilter)},
},
"skipped_not_quiet": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(skippedQuietTime)},
},
"skipped_not_full": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(skippedFullness)},
},
"quiet_for_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(taskConfig.QuietForSeconds)},
},
"min_size_mb": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(taskConfig.MinSizeMB)},
},
"fullness_threshold_percent": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: taskConfig.FullnessRatio * 100},
},
})); err != nil {
return err
}
detailsEmitted := 0
for _, metric := range metrics {
if metric == nil || metric.IsECVolume {
continue
}
sizeMB := float64(metric.Size) / (1024 * 1024)
message := fmt.Sprintf(
"ERASURE CODING: Volume %d: size=%.1fMB (need ≥%dMB), age=%s (need ≥%s), fullness=%.1f%% (need ≥%.1f%%)",
metric.VolumeID,
sizeMB,
taskConfig.MinSizeMB,
metric.Age.Truncate(time.Minute),
quietThreshold.Truncate(time.Minute),
metric.FullnessRatio*100,
taskConfig.FullnessRatio*100,
)
if err := sender.SendActivity(buildDetectorActivity("decision_volume", message, map[string]*plugin_pb.ConfigValue{
"volume_id": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(metric.VolumeID)},
},
"size_mb": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: sizeMB},
},
"required_min_size_mb": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(taskConfig.MinSizeMB)},
},
"age_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(metric.Age.Seconds())},
},
"required_quiet_for_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(taskConfig.QuietForSeconds)},
},
"fullness_percent": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: metric.FullnessRatio * 100},
},
"required_fullness_percent": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: taskConfig.FullnessRatio * 100},
},
})); err != nil {
return err
}
detailsEmitted++
if detailsEmitted >= 3 {
break
}
}
return nil
}
func (h *ErasureCodingHandler) Execute(
ctx context.Context,
request *plugin_pb.ExecuteJobRequest,
sender ExecutionSender,
) error {
if request == nil || request.Job == nil {
return fmt.Errorf("execute request/job is nil")
}
if sender == nil {
return fmt.Errorf("execution sender is nil")
}
if request.Job.JobType != "" && request.Job.JobType != "erasure_coding" {
return fmt.Errorf("job type %q is not handled by erasure_coding worker", request.Job.JobType)
}
params, err := decodeErasureCodingTaskParams(request.Job)
if err != nil {
return err
}
applyErasureCodingExecutionDefaults(params, request.GetClusterContext())
if len(params.Sources) == 0 || strings.TrimSpace(params.Sources[0].Node) == "" {
return fmt.Errorf("erasure coding source node is required")
}
if len(params.Targets) == 0 {
return fmt.Errorf("erasure coding targets are required")
}
task := erasurecodingtask.NewErasureCodingTask(
request.Job.JobId,
params.Sources[0].Node,
params.VolumeId,
params.Collection,
)
task.SetProgressCallback(func(progress float64, stage string) {
message := fmt.Sprintf("erasure coding progress %.0f%%", progress)
if strings.TrimSpace(stage) != "" {
message = stage
}
_ = sender.SendProgress(&plugin_pb.JobProgressUpdate{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
State: plugin_pb.JobState_JOB_STATE_RUNNING,
ProgressPercent: progress,
Stage: stage,
Message: message,
Activities: []*plugin_pb.ActivityEvent{
buildExecutorActivity(stage, message),
},
})
})
if err := sender.SendProgress(&plugin_pb.JobProgressUpdate{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
State: plugin_pb.JobState_JOB_STATE_ASSIGNED,
ProgressPercent: 0,
Stage: "assigned",
Message: "erasure coding job accepted",
Activities: []*plugin_pb.ActivityEvent{
buildExecutorActivity("assigned", "erasure coding job accepted"),
},
}); err != nil {
return err
}
if err := task.Execute(ctx, params); err != nil {
_ = sender.SendProgress(&plugin_pb.JobProgressUpdate{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
State: plugin_pb.JobState_JOB_STATE_FAILED,
ProgressPercent: 100,
Stage: "failed",
Message: err.Error(),
Activities: []*plugin_pb.ActivityEvent{
buildExecutorActivity("failed", err.Error()),
},
})
return err
}
sourceNode := params.Sources[0].Node
resultSummary := fmt.Sprintf("erasure coding completed for volume %d across %d targets", params.VolumeId, len(params.Targets))
return sender.SendCompleted(&plugin_pb.JobCompleted{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
Success: true,
Result: &plugin_pb.JobResult{
Summary: resultSummary,
OutputValues: map[string]*plugin_pb.ConfigValue{
"volume_id": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(params.VolumeId)},
},
"source_server": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: sourceNode},
},
"target_count": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(len(params.Targets))},
},
},
},
Activities: []*plugin_pb.ActivityEvent{
buildExecutorActivity("completed", resultSummary),
},
})
}
func (h *ErasureCodingHandler) collectVolumeMetrics(
ctx context.Context,
masterAddresses []string,
collectionFilter string,
) ([]*workertypes.VolumeHealthMetrics, *topology.ActiveTopology, error) {
// Reuse the same master topology fetch/build flow used by the vacuum handler.
helper := &VacuumHandler{grpcDialOption: h.grpcDialOption}
return helper.collectVolumeMetrics(ctx, masterAddresses, collectionFilter)
}
func deriveErasureCodingWorkerConfig(values map[string]*plugin_pb.ConfigValue) *erasureCodingWorkerConfig {
taskConfig := erasurecodingtask.NewDefaultConfig()
quietForSeconds := int(readInt64Config(values, "quiet_for_seconds", int64(taskConfig.QuietForSeconds)))
if quietForSeconds < 0 {
quietForSeconds = 0
}
taskConfig.QuietForSeconds = quietForSeconds
fullnessRatio := readDoubleConfig(values, "fullness_ratio", taskConfig.FullnessRatio)
if fullnessRatio < 0 {
fullnessRatio = 0
}
if fullnessRatio > 1 {
fullnessRatio = 1
}
taskConfig.FullnessRatio = fullnessRatio
minSizeMB := int(readInt64Config(values, "min_size_mb", int64(taskConfig.MinSizeMB)))
if minSizeMB < 1 {
minSizeMB = 1
}
taskConfig.MinSizeMB = minSizeMB
minIntervalSeconds := int(readInt64Config(values, "min_interval_seconds", 60*60))
if minIntervalSeconds < 0 {
minIntervalSeconds = 0
}
return &erasureCodingWorkerConfig{
TaskConfig: taskConfig,
MinIntervalSeconds: minIntervalSeconds,
}
}
func buildErasureCodingProposal(
result *workertypes.TaskDetectionResult,
) (*plugin_pb.JobProposal, error) {
if result == nil {
return nil, fmt.Errorf("task detection result is nil")
}
if result.TypedParams == nil {
return nil, fmt.Errorf("missing typed params for volume %d", result.VolumeID)
}
params := proto.Clone(result.TypedParams).(*worker_pb.TaskParams)
applyErasureCodingExecutionDefaults(params, nil)
paramsPayload, err := proto.Marshal(params)
if err != nil {
return nil, fmt.Errorf("marshal task params: %w", err)
}
proposalID := strings.TrimSpace(result.TaskID)
if proposalID == "" {
proposalID = fmt.Sprintf("erasure-coding-%d-%d", result.VolumeID, time.Now().UnixNano())
}
dedupeKey := fmt.Sprintf("erasure_coding:%d", result.VolumeID)
if result.Collection != "" {
dedupeKey += ":" + result.Collection
}
sourceNode := ""
if len(params.Sources) > 0 {
sourceNode = strings.TrimSpace(params.Sources[0].Node)
}
summary := fmt.Sprintf("Erasure code volume %d", result.VolumeID)
if sourceNode != "" {
summary = fmt.Sprintf("Erasure code volume %d from %s", result.VolumeID, sourceNode)
}
return &plugin_pb.JobProposal{
ProposalId: proposalID,
DedupeKey: dedupeKey,
JobType: "erasure_coding",
Priority: mapTaskPriority(result.Priority),
Summary: summary,
Detail: strings.TrimSpace(result.Reason),
Parameters: map[string]*plugin_pb.ConfigValue{
"task_params_pb": {
Kind: &plugin_pb.ConfigValue_BytesValue{BytesValue: paramsPayload},
},
"volume_id": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(result.VolumeID)},
},
"source_server": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: sourceNode},
},
"collection": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: result.Collection},
},
"target_count": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(len(params.Targets))},
},
},
Labels: map[string]string{
"task_type": "erasure_coding",
"volume_id": fmt.Sprintf("%d", result.VolumeID),
"collection": result.Collection,
"source_node": sourceNode,
"target_count": fmt.Sprintf("%d", len(params.Targets)),
},
}, nil
}
func decodeErasureCodingTaskParams(job *plugin_pb.JobSpec) (*worker_pb.TaskParams, error) {
if job == nil {
return nil, fmt.Errorf("job spec is nil")
}
if payload := readBytesConfig(job.Parameters, "task_params_pb"); len(payload) > 0 {
params := &worker_pb.TaskParams{}
if err := proto.Unmarshal(payload, params); err != nil {
return nil, fmt.Errorf("unmarshal task_params_pb: %w", err)
}
if params.TaskId == "" {
params.TaskId = job.JobId
}
return params, nil
}
volumeID := readInt64Config(job.Parameters, "volume_id", 0)
sourceNode := strings.TrimSpace(readStringConfig(job.Parameters, "source_server", ""))
if sourceNode == "" {
sourceNode = strings.TrimSpace(readStringConfig(job.Parameters, "server", ""))
}
targetServers := readStringListConfig(job.Parameters, "target_servers")
if len(targetServers) == 0 {
targetServers = readStringListConfig(job.Parameters, "targets")
}
collection := readStringConfig(job.Parameters, "collection", "")
dataShards := int32(readInt64Config(job.Parameters, "data_shards", int64(ecstorage.DataShardsCount)))
if dataShards <= 0 {
dataShards = ecstorage.DataShardsCount
}
parityShards := int32(readInt64Config(job.Parameters, "parity_shards", int64(ecstorage.ParityShardsCount)))
if parityShards <= 0 {
parityShards = ecstorage.ParityShardsCount
}
totalShards := int(dataShards + parityShards)
if volumeID <= 0 {
return nil, fmt.Errorf("missing volume_id in job parameters")
}
if sourceNode == "" {
return nil, fmt.Errorf("missing source_server in job parameters")
}
if len(targetServers) == 0 {
return nil, fmt.Errorf("missing target_servers in job parameters")
}
if len(targetServers) < totalShards {
return nil, fmt.Errorf("insufficient target_servers: got %d, need at least %d", len(targetServers), totalShards)
}
shardAssignments := assignECShardIDs(totalShards, len(targetServers))
targets := make([]*worker_pb.TaskTarget, 0, len(targetServers))
for i := 0; i < len(targetServers); i++ {
targetNode := strings.TrimSpace(targetServers[i])
if targetNode == "" {
continue
}
targets = append(targets, &worker_pb.TaskTarget{
Node: targetNode,
VolumeId: uint32(volumeID),
ShardIds: shardAssignments[i],
})
}
if len(targets) < totalShards {
return nil, fmt.Errorf("insufficient non-empty target_servers after normalization: got %d, need at least %d", len(targets), totalShards)
}
return &worker_pb.TaskParams{
TaskId: job.JobId,
VolumeId: uint32(volumeID),
Collection: collection,
Sources: []*worker_pb.TaskSource{
{
Node: sourceNode,
VolumeId: uint32(volumeID),
},
},
Targets: targets,
TaskParams: &worker_pb.TaskParams_ErasureCodingParams{
ErasureCodingParams: &worker_pb.ErasureCodingTaskParams{
DataShards: dataShards,
ParityShards: parityShards,
},
},
}, nil
}
func applyErasureCodingExecutionDefaults(
params *worker_pb.TaskParams,
clusterContext *plugin_pb.ClusterContext,
) {
if params == nil {
return
}
ecParams := params.GetErasureCodingParams()
if ecParams == nil {
ecParams = &worker_pb.ErasureCodingTaskParams{
DataShards: ecstorage.DataShardsCount,
ParityShards: ecstorage.ParityShardsCount,
}
params.TaskParams = &worker_pb.TaskParams_ErasureCodingParams{ErasureCodingParams: ecParams}
}
if ecParams.DataShards <= 0 {
ecParams.DataShards = ecstorage.DataShardsCount
}
if ecParams.ParityShards <= 0 {
ecParams.ParityShards = ecstorage.ParityShardsCount
}
ecParams.WorkingDir = defaultErasureCodingWorkingDir()
ecParams.CleanupSource = true
if strings.TrimSpace(ecParams.MasterClient) == "" && clusterContext != nil && len(clusterContext.MasterGrpcAddresses) > 0 {
ecParams.MasterClient = clusterContext.MasterGrpcAddresses[0]
}
totalShards := int(ecParams.DataShards + ecParams.ParityShards)
if totalShards <= 0 {
totalShards = ecstorage.TotalShardsCount
}
needsShardAssignment := false
for _, target := range params.Targets {
if target == nil || len(target.ShardIds) == 0 {
needsShardAssignment = true
break
}
}
if needsShardAssignment && len(params.Targets) > 0 {
assignments := assignECShardIDs(totalShards, len(params.Targets))
for i := 0; i < len(params.Targets); i++ {
if params.Targets[i] == nil {
continue
}
if len(params.Targets[i].ShardIds) == 0 {
params.Targets[i].ShardIds = assignments[i]
}
}
}
}
func readStringListConfig(values map[string]*plugin_pb.ConfigValue, field string) []string {
if values == nil {
return nil
}
value := values[field]
if value == nil {
return nil
}
switch kind := value.Kind.(type) {
case *plugin_pb.ConfigValue_StringList:
return normalizeStringList(kind.StringList.GetValues())
case *plugin_pb.ConfigValue_ListValue:
out := make([]string, 0, len(kind.ListValue.GetValues()))
for _, item := range kind.ListValue.GetValues() {
itemText := readStringFromConfigValue(item)
if itemText != "" {
out = append(out, itemText)
}
}
return normalizeStringList(out)
case *plugin_pb.ConfigValue_StringValue:
return normalizeStringList(strings.Split(kind.StringValue, ","))
}
return nil
}
func readStringFromConfigValue(value *plugin_pb.ConfigValue) string {
if value == nil {
return ""
}
switch kind := value.Kind.(type) {
case *plugin_pb.ConfigValue_StringValue:
return strings.TrimSpace(kind.StringValue)
case *plugin_pb.ConfigValue_Int64Value:
return fmt.Sprintf("%d", kind.Int64Value)
case *plugin_pb.ConfigValue_DoubleValue:
return fmt.Sprintf("%g", kind.DoubleValue)
case *plugin_pb.ConfigValue_BoolValue:
if kind.BoolValue {
return "true"
}
return "false"
}
return ""
}
func normalizeStringList(values []string) []string {
normalized := make([]string, 0, len(values))
seen := make(map[string]struct{}, len(values))
for _, value := range values {
item := strings.TrimSpace(value)
if item == "" {
continue
}
if _, found := seen[item]; found {
continue
}
seen[item] = struct{}{}
normalized = append(normalized, item)
}
return normalized
}
func assignECShardIDs(totalShards int, targetCount int) [][]uint32 {
if targetCount <= 0 {
return nil
}
if totalShards <= 0 {
totalShards = ecstorage.TotalShardsCount
}
assignments := make([][]uint32, targetCount)
for i := 0; i < totalShards; i++ {
targetIndex := i % targetCount
assignments[targetIndex] = append(assignments[targetIndex], uint32(i))
}
return assignments
}
func defaultErasureCodingWorkingDir() string {
return filepath.Join(os.TempDir(), "seaweedfs-ec")
}

View File

@@ -0,0 +1,329 @@
package pluginworker
import (
"context"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
ecstorage "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
erasurecodingtask "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
workertypes "github.com/seaweedfs/seaweedfs/weed/worker/types"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
func TestDecodeErasureCodingTaskParamsFromPayload(t *testing.T) {
expected := &worker_pb.TaskParams{
TaskId: "task-ec-1",
VolumeId: 88,
Collection: "images",
Sources: []*worker_pb.TaskSource{
{
Node: "10.0.0.1:8080",
VolumeId: 88,
},
},
Targets: []*worker_pb.TaskTarget{
{
Node: "10.0.0.2:8080",
VolumeId: 88,
ShardIds: []uint32{0, 10},
},
},
TaskParams: &worker_pb.TaskParams_ErasureCodingParams{
ErasureCodingParams: &worker_pb.ErasureCodingTaskParams{
DataShards: ecstorage.DataShardsCount,
ParityShards: ecstorage.ParityShardsCount,
WorkingDir: "/tmp/ec-work",
CleanupSource: true,
},
},
}
payload, err := proto.Marshal(expected)
if err != nil {
t.Fatalf("marshal payload: %v", err)
}
job := &plugin_pb.JobSpec{
JobId: "job-from-admin",
Parameters: map[string]*plugin_pb.ConfigValue{
"task_params_pb": {Kind: &plugin_pb.ConfigValue_BytesValue{BytesValue: payload}},
},
}
actual, err := decodeErasureCodingTaskParams(job)
if err != nil {
t.Fatalf("decodeErasureCodingTaskParams() err = %v", err)
}
if !proto.Equal(expected, actual) {
t.Fatalf("decoded params mismatch\nexpected: %+v\nactual: %+v", expected, actual)
}
}
func TestDecodeErasureCodingTaskParamsFallback(t *testing.T) {
targetServers := make([]string, 0, ecstorage.TotalShardsCount)
for i := 0; i < ecstorage.TotalShardsCount; i++ {
targetServers = append(targetServers, "10.0.0."+string(rune('a'+i))+":8080")
}
job := &plugin_pb.JobSpec{
JobId: "job-ec-2",
Parameters: map[string]*plugin_pb.ConfigValue{
"volume_id": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 7},
},
"source_server": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: "127.0.0.1:8080"},
},
"target_servers": {
Kind: &plugin_pb.ConfigValue_StringList{
StringList: &plugin_pb.StringList{Values: targetServers},
},
},
"collection": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: "videos"},
},
},
}
params, err := decodeErasureCodingTaskParams(job)
if err != nil {
t.Fatalf("decodeErasureCodingTaskParams() err = %v", err)
}
if params.TaskId != "job-ec-2" || params.VolumeId != 7 || params.Collection != "videos" {
t.Fatalf("unexpected basic params: %+v", params)
}
if len(params.Sources) != 1 || params.Sources[0].Node != "127.0.0.1:8080" {
t.Fatalf("unexpected sources: %+v", params.Sources)
}
if len(params.Targets) != ecstorage.TotalShardsCount {
t.Fatalf("unexpected target count: %d", len(params.Targets))
}
if params.GetErasureCodingParams() == nil {
t.Fatalf("expected fallback erasure coding params")
}
}
func TestDeriveErasureCodingWorkerConfig(t *testing.T) {
values := map[string]*plugin_pb.ConfigValue{
"quiet_for_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 720},
},
"fullness_ratio": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0.92},
},
"min_size_mb": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 128},
},
"min_interval_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 55},
},
}
cfg := deriveErasureCodingWorkerConfig(values)
if cfg.TaskConfig.QuietForSeconds != 720 {
t.Fatalf("expected quiet_for_seconds 720, got %d", cfg.TaskConfig.QuietForSeconds)
}
if cfg.TaskConfig.FullnessRatio != 0.92 {
t.Fatalf("expected fullness_ratio 0.92, got %v", cfg.TaskConfig.FullnessRatio)
}
if cfg.TaskConfig.MinSizeMB != 128 {
t.Fatalf("expected min_size_mb 128, got %d", cfg.TaskConfig.MinSizeMB)
}
if cfg.MinIntervalSeconds != 55 {
t.Fatalf("expected min_interval_seconds 55, got %d", cfg.MinIntervalSeconds)
}
}
func TestBuildErasureCodingProposal(t *testing.T) {
params := &worker_pb.TaskParams{
TaskId: "ec-task-1",
VolumeId: 99,
Collection: "c1",
Sources: []*worker_pb.TaskSource{
{
Node: "source-a:8080",
VolumeId: 99,
},
},
Targets: []*worker_pb.TaskTarget{
{
Node: "target-a:8080",
VolumeId: 99,
ShardIds: []uint32{0, 10},
},
{
Node: "target-b:8080",
VolumeId: 99,
ShardIds: []uint32{1, 11},
},
},
TaskParams: &worker_pb.TaskParams_ErasureCodingParams{
ErasureCodingParams: &worker_pb.ErasureCodingTaskParams{
DataShards: ecstorage.DataShardsCount,
ParityShards: ecstorage.ParityShardsCount,
},
},
}
result := &workertypes.TaskDetectionResult{
TaskID: "ec-task-1",
TaskType: workertypes.TaskTypeErasureCoding,
VolumeID: 99,
Server: "source-a",
Collection: "c1",
Priority: workertypes.TaskPriorityLow,
Reason: "quiet and full",
TypedParams: params,
}
proposal, err := buildErasureCodingProposal(result)
if err != nil {
t.Fatalf("buildErasureCodingProposal() err = %v", err)
}
if proposal.JobType != "erasure_coding" {
t.Fatalf("unexpected job type %q", proposal.JobType)
}
if proposal.Parameters["task_params_pb"] == nil {
t.Fatalf("expected serialized task params")
}
if proposal.Labels["source_node"] != "source-a:8080" {
t.Fatalf("unexpected source label %q", proposal.Labels["source_node"])
}
}
func TestErasureCodingHandlerRejectsUnsupportedJobType(t *testing.T) {
handler := NewErasureCodingHandler(nil)
err := handler.Detect(context.Background(), &plugin_pb.RunDetectionRequest{
JobType: "vacuum",
}, noopDetectionSender{})
if err == nil {
t.Fatalf("expected detect job type mismatch error")
}
err = handler.Execute(context.Background(), &plugin_pb.ExecuteJobRequest{
Job: &plugin_pb.JobSpec{JobId: "job-1", JobType: "vacuum"},
}, noopExecutionSender{})
if err == nil {
t.Fatalf("expected execute job type mismatch error")
}
}
func TestErasureCodingHandlerDetectSkipsByMinInterval(t *testing.T) {
handler := NewErasureCodingHandler(nil)
sender := &recordingDetectionSender{}
err := handler.Detect(context.Background(), &plugin_pb.RunDetectionRequest{
JobType: "erasure_coding",
LastSuccessfulRun: timestamppb.New(time.Now().Add(-3 * time.Second)),
WorkerConfigValues: map[string]*plugin_pb.ConfigValue{
"min_interval_seconds": {Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 10}},
},
}, sender)
if err != nil {
t.Fatalf("detect returned err = %v", err)
}
if sender.proposals == nil {
t.Fatalf("expected proposals message")
}
if len(sender.proposals.Proposals) != 0 {
t.Fatalf("expected zero proposals, got %d", len(sender.proposals.Proposals))
}
if sender.complete == nil || !sender.complete.Success {
t.Fatalf("expected successful completion message")
}
if len(sender.events) == 0 {
t.Fatalf("expected detector activity events")
}
if !strings.Contains(sender.events[0].Message, "min interval") {
t.Fatalf("unexpected skip-by-interval message: %q", sender.events[0].Message)
}
}
func TestEmitErasureCodingDetectionDecisionTraceNoTasks(t *testing.T) {
sender := &recordingDetectionSender{}
config := erasurecodingtask.NewDefaultConfig()
config.QuietForSeconds = 5 * 60
config.MinSizeMB = 30
config.FullnessRatio = 0.91
metrics := []*workertypes.VolumeHealthMetrics{
{
VolumeID: 20,
Size: 0,
Age: 218*time.Hour + 41*time.Minute,
FullnessRatio: 0,
},
{
VolumeID: 27,
Size: uint64(16 * 1024 * 1024 / 10),
Age: 91*time.Hour + time.Minute,
FullnessRatio: 0.002,
},
{
VolumeID: 12,
Size: 0,
Age: 219*time.Hour + 49*time.Minute,
FullnessRatio: 0,
},
}
if err := emitErasureCodingDetectionDecisionTrace(sender, metrics, config, nil); err != nil {
t.Fatalf("emitErasureCodingDetectionDecisionTrace error: %v", err)
}
if len(sender.events) < 4 {
t.Fatalf("expected at least 4 detection events, got %d", len(sender.events))
}
if sender.events[0].Source != plugin_pb.ActivitySource_ACTIVITY_SOURCE_DETECTOR {
t.Fatalf("expected detector source, got %v", sender.events[0].Source)
}
if !strings.Contains(sender.events[0].Message, "EC detection: No tasks created for 3 volumes") {
t.Fatalf("unexpected summary message: %q", sender.events[0].Message)
}
if !strings.Contains(sender.events[1].Message, "ERASURE CODING: Volume 20: size=0.0MB") {
t.Fatalf("unexpected first detail message: %q", sender.events[1].Message)
}
}
func TestErasureCodingDescriptorOmitsLocalExecutionFields(t *testing.T) {
descriptor := NewErasureCodingHandler(nil).Descriptor()
if descriptor == nil || descriptor.WorkerConfigForm == nil {
t.Fatalf("expected worker config form in descriptor")
}
if workerConfigFormHasField(descriptor.WorkerConfigForm, "working_dir") {
t.Fatalf("unexpected working_dir in erasure coding worker config form")
}
if workerConfigFormHasField(descriptor.WorkerConfigForm, "cleanup_source") {
t.Fatalf("unexpected cleanup_source in erasure coding worker config form")
}
}
func TestApplyErasureCodingExecutionDefaultsForcesLocalFields(t *testing.T) {
params := &worker_pb.TaskParams{
TaskId: "ec-test",
VolumeId: 100,
TaskParams: &worker_pb.TaskParams_ErasureCodingParams{
ErasureCodingParams: &worker_pb.ErasureCodingTaskParams{
DataShards: ecstorage.DataShardsCount,
ParityShards: ecstorage.ParityShardsCount,
WorkingDir: "/tmp/custom-from-job",
CleanupSource: false,
},
},
}
applyErasureCodingExecutionDefaults(params, nil)
ecParams := params.GetErasureCodingParams()
if ecParams == nil {
t.Fatalf("expected erasure coding params")
}
if ecParams.WorkingDir != defaultErasureCodingWorkingDir() {
t.Fatalf("expected local working_dir %q, got %q", defaultErasureCodingWorkingDir(), ecParams.WorkingDir)
}
if !ecParams.CleanupSource {
t.Fatalf("expected cleanup_source true")
}
}

View File

@@ -0,0 +1,870 @@
package pluginworker
import (
"context"
"fmt"
"sort"
"strconv"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/admin/topology"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
vacuumtask "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
workertypes "github.com/seaweedfs/seaweedfs/weed/worker/types"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
const (
defaultVacuumTaskBatchSize = int32(1000)
)
// VacuumHandler is the plugin job handler for vacuum job type.
type VacuumHandler struct {
grpcDialOption grpc.DialOption
}
func NewVacuumHandler(grpcDialOption grpc.DialOption) *VacuumHandler {
return &VacuumHandler{grpcDialOption: grpcDialOption}
}
func (h *VacuumHandler) Capability() *plugin_pb.JobTypeCapability {
return &plugin_pb.JobTypeCapability{
JobType: "vacuum",
CanDetect: true,
CanExecute: true,
MaxDetectionConcurrency: 1,
MaxExecutionConcurrency: 2,
DisplayName: "Volume Vacuum",
Description: "Reclaims disk space by removing deleted files from volumes",
}
}
func (h *VacuumHandler) Descriptor() *plugin_pb.JobTypeDescriptor {
return &plugin_pb.JobTypeDescriptor{
JobType: "vacuum",
DisplayName: "Volume Vacuum",
Description: "Detect and vacuum volumes with high garbage ratio",
Icon: "fas fa-broom",
DescriptorVersion: 1,
AdminConfigForm: &plugin_pb.ConfigForm{
FormId: "vacuum-admin",
Title: "Vacuum Admin Config",
Description: "Admin-side controls for vacuum detection scope.",
Sections: []*plugin_pb.ConfigSection{
{
SectionId: "scope",
Title: "Scope",
Description: "Optional filter to restrict detection.",
Fields: []*plugin_pb.ConfigField{
{
Name: "collection_filter",
Label: "Collection Filter",
Description: "Only scan this collection when set.",
Placeholder: "all collections",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_STRING,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_TEXT,
},
},
},
},
DefaultValues: map[string]*plugin_pb.ConfigValue{
"collection_filter": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: ""},
},
},
},
WorkerConfigForm: &plugin_pb.ConfigForm{
FormId: "vacuum-worker",
Title: "Vacuum Worker Config",
Description: "Worker-side vacuum thresholds.",
Sections: []*plugin_pb.ConfigSection{
{
SectionId: "thresholds",
Title: "Thresholds",
Description: "Detection thresholds and timing constraints.",
Fields: []*plugin_pb.ConfigField{
{
Name: "garbage_threshold",
Label: "Garbage Threshold",
Description: "Detect volumes with garbage ratio >= threshold.",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_DOUBLE,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_NUMBER,
Required: true,
MinValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0}},
MaxValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 1}},
},
{
Name: "min_volume_age_seconds",
Label: "Min Volume Age (s)",
Description: "Only detect volumes older than this age.",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_INT64,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_NUMBER,
Required: true,
MinValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 0}},
},
{
Name: "min_interval_seconds",
Label: "Min Interval (s)",
Description: "Minimum interval between vacuum on the same volume.",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_INT64,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_NUMBER,
Required: true,
MinValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 0}},
},
},
},
},
DefaultValues: map[string]*plugin_pb.ConfigValue{
"garbage_threshold": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0.3},
},
"min_volume_age_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 24 * 60 * 60},
},
"min_interval_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 7 * 24 * 60 * 60},
},
},
},
AdminRuntimeDefaults: &plugin_pb.AdminRuntimeDefaults{
Enabled: true,
DetectionIntervalSeconds: 2 * 60 * 60,
DetectionTimeoutSeconds: 120,
MaxJobsPerDetection: 200,
GlobalExecutionConcurrency: 16,
PerWorkerExecutionConcurrency: 4,
RetryLimit: 1,
RetryBackoffSeconds: 10,
},
WorkerDefaultValues: map[string]*plugin_pb.ConfigValue{
"garbage_threshold": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0.3},
},
"min_volume_age_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 24 * 60 * 60},
},
"min_interval_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 7 * 24 * 60 * 60},
},
},
}
}
func (h *VacuumHandler) Detect(ctx context.Context, request *plugin_pb.RunDetectionRequest, sender DetectionSender) error {
if request == nil {
return fmt.Errorf("run detection request is nil")
}
if sender == nil {
return fmt.Errorf("detection sender is nil")
}
if request.JobType != "" && request.JobType != "vacuum" {
return fmt.Errorf("job type %q is not handled by vacuum worker", request.JobType)
}
workerConfig := deriveVacuumConfig(request.GetWorkerConfigValues())
if shouldSkipDetectionByInterval(request.GetLastSuccessfulRun(), workerConfig.MinIntervalSeconds) {
minInterval := time.Duration(workerConfig.MinIntervalSeconds) * time.Second
_ = sender.SendActivity(buildDetectorActivity(
"skipped_by_interval",
fmt.Sprintf("VACUUM: Detection skipped due to min interval (%s)", minInterval),
map[string]*plugin_pb.ConfigValue{
"min_interval_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(workerConfig.MinIntervalSeconds)},
},
},
))
if err := sender.SendProposals(&plugin_pb.DetectionProposals{
JobType: "vacuum",
Proposals: []*plugin_pb.JobProposal{},
HasMore: false,
}); err != nil {
return err
}
return sender.SendComplete(&plugin_pb.DetectionComplete{
JobType: "vacuum",
Success: true,
TotalProposals: 0,
})
}
collectionFilter := strings.TrimSpace(readStringConfig(request.GetAdminConfigValues(), "collection_filter", ""))
masters := make([]string, 0)
if request.ClusterContext != nil {
masters = append(masters, request.ClusterContext.MasterGrpcAddresses...)
}
metrics, activeTopology, err := h.collectVolumeMetrics(ctx, masters, collectionFilter)
if err != nil {
return err
}
clusterInfo := &workertypes.ClusterInfo{ActiveTopology: activeTopology}
results, err := vacuumtask.Detection(metrics, clusterInfo, workerConfig)
if err != nil {
return err
}
if traceErr := emitVacuumDetectionDecisionTrace(sender, metrics, workerConfig, results); traceErr != nil {
glog.Warningf("Plugin worker failed to emit vacuum detection trace: %v", traceErr)
}
maxResults := int(request.MaxResults)
hasMore := false
if maxResults > 0 && len(results) > maxResults {
hasMore = true
results = results[:maxResults]
}
proposals := make([]*plugin_pb.JobProposal, 0, len(results))
for _, result := range results {
proposal, proposalErr := buildVacuumProposal(result)
if proposalErr != nil {
glog.Warningf("Plugin worker skip invalid vacuum proposal: %v", proposalErr)
continue
}
proposals = append(proposals, proposal)
}
if err := sender.SendProposals(&plugin_pb.DetectionProposals{
JobType: "vacuum",
Proposals: proposals,
HasMore: hasMore,
}); err != nil {
return err
}
return sender.SendComplete(&plugin_pb.DetectionComplete{
JobType: "vacuum",
Success: true,
TotalProposals: int32(len(proposals)),
})
}
func emitVacuumDetectionDecisionTrace(
sender DetectionSender,
metrics []*workertypes.VolumeHealthMetrics,
workerConfig *vacuumtask.Config,
results []*workertypes.TaskDetectionResult,
) error {
if sender == nil || workerConfig == nil {
return nil
}
minVolumeAge := time.Duration(workerConfig.MinVolumeAgeSeconds) * time.Second
totalVolumes := len(metrics)
debugCount := 0
skippedDueToGarbage := 0
skippedDueToAge := 0
for _, metric := range metrics {
if metric == nil {
continue
}
if metric.GarbageRatio >= workerConfig.GarbageThreshold && metric.Age >= minVolumeAge {
continue
}
if debugCount < 5 {
if metric.GarbageRatio < workerConfig.GarbageThreshold {
skippedDueToGarbage++
}
if metric.Age < minVolumeAge {
skippedDueToAge++
}
}
debugCount++
}
summaryMessage := ""
summaryStage := "decision_summary"
if len(results) == 0 {
summaryMessage = fmt.Sprintf(
"VACUUM: No tasks created for %d volumes. Threshold=%.2f%%, MinAge=%s. Skipped: %d (garbage<threshold), %d (age<minimum)",
totalVolumes,
workerConfig.GarbageThreshold*100,
minVolumeAge,
skippedDueToGarbage,
skippedDueToAge,
)
} else {
summaryMessage = fmt.Sprintf(
"VACUUM: Created %d task(s) from %d volumes. Threshold=%.2f%%, MinAge=%s",
len(results),
totalVolumes,
workerConfig.GarbageThreshold*100,
minVolumeAge,
)
}
if err := sender.SendActivity(buildDetectorActivity(summaryStage, summaryMessage, map[string]*plugin_pb.ConfigValue{
"total_volumes": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(totalVolumes)},
},
"selected_tasks": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(len(results))},
},
"garbage_threshold_percent": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: workerConfig.GarbageThreshold * 100},
},
"min_volume_age_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(workerConfig.MinVolumeAgeSeconds)},
},
"skipped_garbage": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(skippedDueToGarbage)},
},
"skipped_age": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(skippedDueToAge)},
},
})); err != nil {
return err
}
limit := 3
if len(metrics) < limit {
limit = len(metrics)
}
for i := 0; i < limit; i++ {
metric := metrics[i]
if metric == nil {
continue
}
message := fmt.Sprintf(
"VACUUM: Volume %d: garbage=%.2f%% (need ≥%.2f%%), age=%s (need ≥%s)",
metric.VolumeID,
metric.GarbageRatio*100,
workerConfig.GarbageThreshold*100,
metric.Age.Truncate(time.Minute),
minVolumeAge.Truncate(time.Minute),
)
if err := sender.SendActivity(buildDetectorActivity("decision_volume", message, map[string]*plugin_pb.ConfigValue{
"volume_id": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(metric.VolumeID)},
},
"garbage_percent": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: metric.GarbageRatio * 100},
},
"required_garbage_percent": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: workerConfig.GarbageThreshold * 100},
},
"age_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(metric.Age.Seconds())},
},
"required_age_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(minVolumeAge.Seconds())},
},
})); err != nil {
return err
}
}
return nil
}
func (h *VacuumHandler) Execute(ctx context.Context, request *plugin_pb.ExecuteJobRequest, sender ExecutionSender) error {
if request == nil || request.Job == nil {
return fmt.Errorf("execute request/job is nil")
}
if sender == nil {
return fmt.Errorf("execution sender is nil")
}
if request.Job.JobType != "" && request.Job.JobType != "vacuum" {
return fmt.Errorf("job type %q is not handled by vacuum worker", request.Job.JobType)
}
params, err := decodeVacuumTaskParams(request.Job)
if err != nil {
return err
}
if len(params.Sources) == 0 || strings.TrimSpace(params.Sources[0].Node) == "" {
return fmt.Errorf("vacuum task source node is required")
}
workerConfig := deriveVacuumConfig(request.GetWorkerConfigValues())
if vacuumParams := params.GetVacuumParams(); vacuumParams != nil {
if vacuumParams.GarbageThreshold <= 0 {
vacuumParams.GarbageThreshold = workerConfig.GarbageThreshold
}
} else {
params.TaskParams = &worker_pb.TaskParams_VacuumParams{
VacuumParams: &worker_pb.VacuumTaskParams{
GarbageThreshold: workerConfig.GarbageThreshold,
BatchSize: defaultVacuumTaskBatchSize,
VerifyChecksum: true,
},
}
}
task := vacuumtask.NewVacuumTask(
request.Job.JobId,
params.Sources[0].Node,
params.VolumeId,
params.Collection,
)
task.SetProgressCallback(func(progress float64, stage string) {
message := fmt.Sprintf("vacuum progress %.0f%%", progress)
if strings.TrimSpace(stage) != "" {
message = stage
}
_ = sender.SendProgress(&plugin_pb.JobProgressUpdate{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
State: plugin_pb.JobState_JOB_STATE_RUNNING,
ProgressPercent: progress,
Stage: stage,
Message: message,
Activities: []*plugin_pb.ActivityEvent{
buildExecutorActivity(stage, message),
},
})
})
if err := sender.SendProgress(&plugin_pb.JobProgressUpdate{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
State: plugin_pb.JobState_JOB_STATE_ASSIGNED,
ProgressPercent: 0,
Stage: "assigned",
Message: "vacuum job accepted",
Activities: []*plugin_pb.ActivityEvent{
buildExecutorActivity("assigned", "vacuum job accepted"),
},
}); err != nil {
return err
}
if err := task.Execute(ctx, params); err != nil {
_ = sender.SendProgress(&plugin_pb.JobProgressUpdate{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
State: plugin_pb.JobState_JOB_STATE_FAILED,
ProgressPercent: 100,
Stage: "failed",
Message: err.Error(),
Activities: []*plugin_pb.ActivityEvent{
buildExecutorActivity("failed", err.Error()),
},
})
return err
}
resultSummary := fmt.Sprintf("vacuum completed for volume %d", params.VolumeId)
return sender.SendCompleted(&plugin_pb.JobCompleted{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
Success: true,
Result: &plugin_pb.JobResult{
Summary: resultSummary,
OutputValues: map[string]*plugin_pb.ConfigValue{
"volume_id": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(params.VolumeId)},
},
"server": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: params.Sources[0].Node},
},
},
},
Activities: []*plugin_pb.ActivityEvent{
buildExecutorActivity("completed", resultSummary),
},
})
}
func (h *VacuumHandler) collectVolumeMetrics(
ctx context.Context,
masterAddresses []string,
collectionFilter string,
) ([]*workertypes.VolumeHealthMetrics, *topology.ActiveTopology, error) {
if h.grpcDialOption == nil {
return nil, nil, fmt.Errorf("grpc dial option is not configured")
}
if len(masterAddresses) == 0 {
return nil, nil, fmt.Errorf("no master addresses provided in cluster context")
}
for _, masterAddress := range masterAddresses {
response, err := h.fetchVolumeList(ctx, masterAddress)
if err != nil {
glog.Warningf("Plugin worker failed master volume list at %s: %v", masterAddress, err)
continue
}
metrics, activeTopology, buildErr := buildVolumeMetrics(response, collectionFilter)
if buildErr != nil {
glog.Warningf("Plugin worker failed to build metrics from master %s: %v", masterAddress, buildErr)
continue
}
return metrics, activeTopology, nil
}
return nil, nil, fmt.Errorf("failed to load topology from all provided masters")
}
func (h *VacuumHandler) fetchVolumeList(ctx context.Context, address string) (*master_pb.VolumeListResponse, error) {
var lastErr error
for _, candidate := range masterAddressCandidates(address) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
dialCtx, cancelDial := context.WithTimeout(ctx, 5*time.Second)
conn, err := pb.GrpcDial(dialCtx, candidate, false, h.grpcDialOption)
cancelDial()
if err != nil {
lastErr = err
continue
}
client := master_pb.NewSeaweedClient(conn)
callCtx, cancelCall := context.WithTimeout(ctx, 10*time.Second)
response, callErr := client.VolumeList(callCtx, &master_pb.VolumeListRequest{})
cancelCall()
_ = conn.Close()
if callErr == nil {
return response, nil
}
lastErr = callErr
}
if lastErr == nil {
lastErr = fmt.Errorf("no valid master address candidate")
}
return nil, lastErr
}
func deriveVacuumConfig(values map[string]*plugin_pb.ConfigValue) *vacuumtask.Config {
config := vacuumtask.NewDefaultConfig()
config.GarbageThreshold = readDoubleConfig(values, "garbage_threshold", config.GarbageThreshold)
config.MinVolumeAgeSeconds = int(readInt64Config(values, "min_volume_age_seconds", int64(config.MinVolumeAgeSeconds)))
config.MinIntervalSeconds = int(readInt64Config(values, "min_interval_seconds", int64(config.MinIntervalSeconds)))
return config
}
func buildVolumeMetrics(
response *master_pb.VolumeListResponse,
collectionFilter string,
) ([]*workertypes.VolumeHealthMetrics, *topology.ActiveTopology, error) {
if response == nil || response.TopologyInfo == nil {
return nil, nil, fmt.Errorf("volume list response has no topology info")
}
activeTopology := topology.NewActiveTopology(10)
if err := activeTopology.UpdateTopology(response.TopologyInfo); err != nil {
return nil, nil, err
}
filter := strings.TrimSpace(collectionFilter)
volumeSizeLimitBytes := uint64(response.VolumeSizeLimitMb) * 1024 * 1024
now := time.Now()
metrics := make([]*workertypes.VolumeHealthMetrics, 0, 256)
for _, dc := range response.TopologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos {
for _, node := range rack.DataNodeInfos {
for diskType, diskInfo := range node.DiskInfos {
for _, volume := range diskInfo.VolumeInfos {
if filter != "" && volume.Collection != filter {
continue
}
metric := &workertypes.VolumeHealthMetrics{
VolumeID: volume.Id,
Server: node.Id,
ServerAddress: node.Address,
DiskType: diskType,
DiskId: volume.DiskId,
DataCenter: dc.Id,
Rack: rack.Id,
Collection: volume.Collection,
Size: volume.Size,
DeletedBytes: volume.DeletedByteCount,
LastModified: time.Unix(volume.ModifiedAtSecond, 0),
ReplicaCount: 1,
ExpectedReplicas: int(volume.ReplicaPlacement),
IsReadOnly: volume.ReadOnly,
}
if metric.Size > 0 {
metric.GarbageRatio = float64(metric.DeletedBytes) / float64(metric.Size)
}
if volumeSizeLimitBytes > 0 {
metric.FullnessRatio = float64(metric.Size) / float64(volumeSizeLimitBytes)
}
metric.Age = now.Sub(metric.LastModified)
metrics = append(metrics, metric)
}
}
}
}
}
replicaCounts := make(map[uint32]int)
for _, metric := range metrics {
replicaCounts[metric.VolumeID]++
}
for _, metric := range metrics {
metric.ReplicaCount = replicaCounts[metric.VolumeID]
}
return metrics, activeTopology, nil
}
func buildVacuumProposal(result *workertypes.TaskDetectionResult) (*plugin_pb.JobProposal, error) {
if result == nil {
return nil, fmt.Errorf("task detection result is nil")
}
if result.TypedParams == nil {
return nil, fmt.Errorf("missing typed params for volume %d", result.VolumeID)
}
paramsPayload, err := proto.Marshal(result.TypedParams)
if err != nil {
return nil, fmt.Errorf("marshal task params: %w", err)
}
proposalID := strings.TrimSpace(result.TaskID)
if proposalID == "" {
proposalID = fmt.Sprintf("vacuum-%d-%d", result.VolumeID, time.Now().UnixNano())
}
dedupeKey := fmt.Sprintf("vacuum:%d", result.VolumeID)
if result.Collection != "" {
dedupeKey = dedupeKey + ":" + result.Collection
}
summary := fmt.Sprintf("Vacuum volume %d", result.VolumeID)
if strings.TrimSpace(result.Server) != "" {
summary = summary + " on " + result.Server
}
return &plugin_pb.JobProposal{
ProposalId: proposalID,
DedupeKey: dedupeKey,
JobType: "vacuum",
Priority: mapTaskPriority(result.Priority),
Summary: summary,
Detail: strings.TrimSpace(result.Reason),
Parameters: map[string]*plugin_pb.ConfigValue{
"task_params_pb": {
Kind: &plugin_pb.ConfigValue_BytesValue{BytesValue: paramsPayload},
},
"volume_id": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(result.VolumeID)},
},
"server": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: result.Server},
},
"collection": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: result.Collection},
},
},
Labels: map[string]string{
"task_type": "vacuum",
"volume_id": fmt.Sprintf("%d", result.VolumeID),
"collection": result.Collection,
"source_node": result.Server,
},
}, nil
}
func decodeVacuumTaskParams(job *plugin_pb.JobSpec) (*worker_pb.TaskParams, error) {
if job == nil {
return nil, fmt.Errorf("job spec is nil")
}
if payload := readBytesConfig(job.Parameters, "task_params_pb"); len(payload) > 0 {
params := &worker_pb.TaskParams{}
if err := proto.Unmarshal(payload, params); err != nil {
return nil, fmt.Errorf("unmarshal task_params_pb: %w", err)
}
if params.TaskId == "" {
params.TaskId = job.JobId
}
return params, nil
}
volumeID := readInt64Config(job.Parameters, "volume_id", 0)
server := readStringConfig(job.Parameters, "server", "")
collection := readStringConfig(job.Parameters, "collection", "")
if volumeID <= 0 {
return nil, fmt.Errorf("missing volume_id in job parameters")
}
if strings.TrimSpace(server) == "" {
return nil, fmt.Errorf("missing server in job parameters")
}
return &worker_pb.TaskParams{
TaskId: job.JobId,
VolumeId: uint32(volumeID),
Collection: collection,
Sources: []*worker_pb.TaskSource{
{
Node: server,
VolumeId: uint32(volumeID),
},
},
TaskParams: &worker_pb.TaskParams_VacuumParams{
VacuumParams: &worker_pb.VacuumTaskParams{
GarbageThreshold: 0.3,
BatchSize: defaultVacuumTaskBatchSize,
VerifyChecksum: true,
},
},
}, nil
}
func readStringConfig(values map[string]*plugin_pb.ConfigValue, field string, fallback string) string {
if values == nil {
return fallback
}
value := values[field]
if value == nil {
return fallback
}
switch kind := value.Kind.(type) {
case *plugin_pb.ConfigValue_StringValue:
return kind.StringValue
case *plugin_pb.ConfigValue_Int64Value:
return strconv.FormatInt(kind.Int64Value, 10)
case *plugin_pb.ConfigValue_DoubleValue:
return strconv.FormatFloat(kind.DoubleValue, 'f', -1, 64)
case *plugin_pb.ConfigValue_BoolValue:
return strconv.FormatBool(kind.BoolValue)
}
return fallback
}
func readDoubleConfig(values map[string]*plugin_pb.ConfigValue, field string, fallback float64) float64 {
if values == nil {
return fallback
}
value := values[field]
if value == nil {
return fallback
}
switch kind := value.Kind.(type) {
case *plugin_pb.ConfigValue_DoubleValue:
return kind.DoubleValue
case *plugin_pb.ConfigValue_Int64Value:
return float64(kind.Int64Value)
case *plugin_pb.ConfigValue_StringValue:
parsed, err := strconv.ParseFloat(strings.TrimSpace(kind.StringValue), 64)
if err == nil {
return parsed
}
case *plugin_pb.ConfigValue_BoolValue:
if kind.BoolValue {
return 1
}
return 0
}
return fallback
}
func readInt64Config(values map[string]*plugin_pb.ConfigValue, field string, fallback int64) int64 {
if values == nil {
return fallback
}
value := values[field]
if value == nil {
return fallback
}
switch kind := value.Kind.(type) {
case *plugin_pb.ConfigValue_Int64Value:
return kind.Int64Value
case *plugin_pb.ConfigValue_DoubleValue:
return int64(kind.DoubleValue)
case *plugin_pb.ConfigValue_StringValue:
parsed, err := strconv.ParseInt(strings.TrimSpace(kind.StringValue), 10, 64)
if err == nil {
return parsed
}
case *plugin_pb.ConfigValue_BoolValue:
if kind.BoolValue {
return 1
}
return 0
}
return fallback
}
func readBytesConfig(values map[string]*plugin_pb.ConfigValue, field string) []byte {
if values == nil {
return nil
}
value := values[field]
if value == nil {
return nil
}
if kind, ok := value.Kind.(*plugin_pb.ConfigValue_BytesValue); ok {
return kind.BytesValue
}
return nil
}
func mapTaskPriority(priority workertypes.TaskPriority) plugin_pb.JobPriority {
switch strings.ToLower(string(priority)) {
case "low":
return plugin_pb.JobPriority_JOB_PRIORITY_LOW
case "medium", "normal":
return plugin_pb.JobPriority_JOB_PRIORITY_NORMAL
case "high":
return plugin_pb.JobPriority_JOB_PRIORITY_HIGH
case "critical":
return plugin_pb.JobPriority_JOB_PRIORITY_CRITICAL
default:
return plugin_pb.JobPriority_JOB_PRIORITY_NORMAL
}
}
func masterAddressCandidates(address string) []string {
trimmed := strings.TrimSpace(address)
if trimmed == "" {
return nil
}
candidateSet := map[string]struct{}{
trimmed: {},
}
converted := pb.ServerToGrpcAddress(trimmed)
candidateSet[converted] = struct{}{}
candidates := make([]string, 0, len(candidateSet))
for candidate := range candidateSet {
candidates = append(candidates, candidate)
}
sort.Strings(candidates)
return candidates
}
func shouldSkipDetectionByInterval(lastSuccessfulRun *timestamppb.Timestamp, minIntervalSeconds int) bool {
if lastSuccessfulRun == nil || minIntervalSeconds <= 0 {
return false
}
lastRun := lastSuccessfulRun.AsTime()
if lastRun.IsZero() {
return false
}
return time.Since(lastRun) < time.Duration(minIntervalSeconds)*time.Second
}
func buildExecutorActivity(stage string, message string) *plugin_pb.ActivityEvent {
return &plugin_pb.ActivityEvent{
Source: plugin_pb.ActivitySource_ACTIVITY_SOURCE_EXECUTOR,
Stage: stage,
Message: message,
CreatedAt: timestamppb.Now(),
}
}
func buildDetectorActivity(stage string, message string, details map[string]*plugin_pb.ConfigValue) *plugin_pb.ActivityEvent {
return &plugin_pb.ActivityEvent{
Source: plugin_pb.ActivitySource_ACTIVITY_SOURCE_DETECTOR,
Stage: stage,
Message: message,
Details: details,
CreatedAt: timestamppb.Now(),
}
}

View File

@@ -0,0 +1,277 @@
package pluginworker
import (
"context"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
vacuumtask "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
workertypes "github.com/seaweedfs/seaweedfs/weed/worker/types"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
func TestDecodeVacuumTaskParamsFromPayload(t *testing.T) {
expected := &worker_pb.TaskParams{
TaskId: "task-1",
VolumeId: 42,
Collection: "photos",
Sources: []*worker_pb.TaskSource{
{
Node: "10.0.0.1:8080",
VolumeId: 42,
},
},
TaskParams: &worker_pb.TaskParams_VacuumParams{
VacuumParams: &worker_pb.VacuumTaskParams{
GarbageThreshold: 0.33,
BatchSize: 500,
VerifyChecksum: true,
},
},
}
payload, err := proto.Marshal(expected)
if err != nil {
t.Fatalf("marshal payload: %v", err)
}
job := &plugin_pb.JobSpec{
JobId: "job-from-admin",
Parameters: map[string]*plugin_pb.ConfigValue{
"task_params_pb": {Kind: &plugin_pb.ConfigValue_BytesValue{BytesValue: payload}},
},
}
actual, err := decodeVacuumTaskParams(job)
if err != nil {
t.Fatalf("decodeVacuumTaskParams() err = %v", err)
}
if !proto.Equal(expected, actual) {
t.Fatalf("decoded params mismatch\nexpected: %+v\nactual: %+v", expected, actual)
}
}
func TestDecodeVacuumTaskParamsFallback(t *testing.T) {
job := &plugin_pb.JobSpec{
JobId: "job-2",
Parameters: map[string]*plugin_pb.ConfigValue{
"volume_id": {Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 7}},
"server": {Kind: &plugin_pb.ConfigValue_StringValue{StringValue: "127.0.0.1:8080"}},
"collection": {Kind: &plugin_pb.ConfigValue_StringValue{StringValue: "videos"}},
},
}
params, err := decodeVacuumTaskParams(job)
if err != nil {
t.Fatalf("decodeVacuumTaskParams() err = %v", err)
}
if params.TaskId != "job-2" || params.VolumeId != 7 || params.Collection != "videos" {
t.Fatalf("unexpected basic params: %+v", params)
}
if len(params.Sources) != 1 || params.Sources[0].Node != "127.0.0.1:8080" {
t.Fatalf("unexpected sources: %+v", params.Sources)
}
if params.GetVacuumParams() == nil {
t.Fatalf("expected fallback vacuum params")
}
}
func TestDeriveVacuumConfigAllowsZeroValues(t *testing.T) {
values := map[string]*plugin_pb.ConfigValue{
"garbage_threshold": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0},
},
"min_volume_age_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 0},
},
"min_interval_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 0},
},
}
cfg := deriveVacuumConfig(values)
if cfg.GarbageThreshold != 0 {
t.Fatalf("expected garbage_threshold 0, got %v", cfg.GarbageThreshold)
}
if cfg.MinVolumeAgeSeconds != 0 {
t.Fatalf("expected min_volume_age_seconds 0, got %d", cfg.MinVolumeAgeSeconds)
}
if cfg.MinIntervalSeconds != 0 {
t.Fatalf("expected min_interval_seconds 0, got %d", cfg.MinIntervalSeconds)
}
}
func TestMasterAddressCandidates(t *testing.T) {
candidates := masterAddressCandidates("localhost:9333")
if len(candidates) != 2 {
t.Fatalf("expected 2 candidates, got %d: %v", len(candidates), candidates)
}
seen := map[string]bool{}
for _, candidate := range candidates {
seen[candidate] = true
}
if !seen["localhost:9333"] {
t.Fatalf("expected original address in candidates: %v", candidates)
}
if !seen["localhost:19333"] {
t.Fatalf("expected grpc address in candidates: %v", candidates)
}
}
func TestShouldSkipDetectionByInterval(t *testing.T) {
if shouldSkipDetectionByInterval(nil, 10) {
t.Fatalf("expected false when timestamp is nil")
}
if shouldSkipDetectionByInterval(timestamppb.Now(), 0) {
t.Fatalf("expected false when min interval is zero")
}
recent := timestamppb.New(time.Now().Add(-5 * time.Second))
if !shouldSkipDetectionByInterval(recent, 10) {
t.Fatalf("expected true for recent successful run")
}
old := timestamppb.New(time.Now().Add(-30 * time.Second))
if shouldSkipDetectionByInterval(old, 10) {
t.Fatalf("expected false for old successful run")
}
}
func TestVacuumHandlerRejectsUnsupportedJobType(t *testing.T) {
handler := NewVacuumHandler(nil)
err := handler.Detect(context.Background(), &plugin_pb.RunDetectionRequest{
JobType: "balance",
}, noopDetectionSender{})
if err == nil {
t.Fatalf("expected detect job type mismatch error")
}
err = handler.Execute(context.Background(), &plugin_pb.ExecuteJobRequest{
Job: &plugin_pb.JobSpec{JobId: "job-1", JobType: "balance"},
}, noopExecutionSender{})
if err == nil {
t.Fatalf("expected execute job type mismatch error")
}
}
func TestVacuumHandlerDetectSkipsByMinInterval(t *testing.T) {
handler := NewVacuumHandler(nil)
sender := &recordingDetectionSender{}
err := handler.Detect(context.Background(), &plugin_pb.RunDetectionRequest{
JobType: "vacuum",
LastSuccessfulRun: timestamppb.New(time.Now().Add(-3 * time.Second)),
WorkerConfigValues: map[string]*plugin_pb.ConfigValue{
"min_interval_seconds": {Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 10}},
},
}, sender)
if err != nil {
t.Fatalf("detect returned err = %v", err)
}
if sender.proposals == nil {
t.Fatalf("expected proposals message")
}
if len(sender.proposals.Proposals) != 0 {
t.Fatalf("expected zero proposals, got %d", len(sender.proposals.Proposals))
}
if sender.complete == nil || !sender.complete.Success {
t.Fatalf("expected successful completion message")
}
}
func TestBuildExecutorActivity(t *testing.T) {
activity := buildExecutorActivity("running", "vacuum in progress")
if activity == nil {
t.Fatalf("expected non-nil activity")
}
if activity.Source != plugin_pb.ActivitySource_ACTIVITY_SOURCE_EXECUTOR {
t.Fatalf("unexpected source: %v", activity.Source)
}
if activity.Stage != "running" {
t.Fatalf("unexpected stage: %q", activity.Stage)
}
if activity.Message != "vacuum in progress" {
t.Fatalf("unexpected message: %q", activity.Message)
}
if activity.CreatedAt == nil {
t.Fatalf("expected created_at timestamp")
}
}
func TestEmitVacuumDetectionDecisionTraceNoTasks(t *testing.T) {
sender := &recordingDetectionSender{}
config := vacuumtask.NewDefaultConfig()
config.GarbageThreshold = 0.3
config.MinVolumeAgeSeconds = int((24 * time.Hour).Seconds())
metrics := []*workertypes.VolumeHealthMetrics{
{
VolumeID: 17,
GarbageRatio: 0,
Age: 218*time.Hour + 23*time.Minute,
},
{
VolumeID: 16,
GarbageRatio: 0,
Age: 218*time.Hour + 22*time.Minute,
},
{
VolumeID: 6,
GarbageRatio: 0,
Age: 90*time.Hour + 42*time.Minute,
},
}
if err := emitVacuumDetectionDecisionTrace(sender, metrics, config, nil); err != nil {
t.Fatalf("emitVacuumDetectionDecisionTrace error: %v", err)
}
if len(sender.events) < 4 {
t.Fatalf("expected at least 4 detection events, got %d", len(sender.events))
}
if sender.events[0].Source != plugin_pb.ActivitySource_ACTIVITY_SOURCE_DETECTOR {
t.Fatalf("expected detector source, got %v", sender.events[0].Source)
}
if !strings.Contains(sender.events[0].Message, "VACUUM: No tasks created for 3 volumes") {
t.Fatalf("unexpected summary message: %q", sender.events[0].Message)
}
if !strings.Contains(sender.events[1].Message, "VACUUM: Volume 17: garbage=0.00%") {
t.Fatalf("unexpected first detail message: %q", sender.events[1].Message)
}
}
type noopDetectionSender struct{}
func (noopDetectionSender) SendProposals(*plugin_pb.DetectionProposals) error { return nil }
func (noopDetectionSender) SendComplete(*plugin_pb.DetectionComplete) error { return nil }
func (noopDetectionSender) SendActivity(*plugin_pb.ActivityEvent) error { return nil }
type noopExecutionSender struct{}
func (noopExecutionSender) SendProgress(*plugin_pb.JobProgressUpdate) error { return nil }
func (noopExecutionSender) SendCompleted(*plugin_pb.JobCompleted) error { return nil }
type recordingDetectionSender struct {
proposals *plugin_pb.DetectionProposals
complete *plugin_pb.DetectionComplete
events []*plugin_pb.ActivityEvent
}
func (r *recordingDetectionSender) SendProposals(proposals *plugin_pb.DetectionProposals) error {
r.proposals = proposals
return nil
}
func (r *recordingDetectionSender) SendComplete(complete *plugin_pb.DetectionComplete) error {
r.complete = complete
return nil
}
func (r *recordingDetectionSender) SendActivity(event *plugin_pb.ActivityEvent) error {
if event != nil {
r.events = append(r.events, event)
}
return nil
}

View File

@@ -0,0 +1,826 @@
package pluginworker
import (
"context"
"fmt"
"sort"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/admin/topology"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
balancetask "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
workertypes "github.com/seaweedfs/seaweedfs/weed/worker/types"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
)
const (
defaultBalanceTimeoutSeconds = int32(10 * 60)
)
type volumeBalanceWorkerConfig struct {
TaskConfig *balancetask.Config
MinIntervalSeconds int
}
// VolumeBalanceHandler is the plugin job handler for volume balancing.
type VolumeBalanceHandler struct {
grpcDialOption grpc.DialOption
}
func NewVolumeBalanceHandler(grpcDialOption grpc.DialOption) *VolumeBalanceHandler {
return &VolumeBalanceHandler{grpcDialOption: grpcDialOption}
}
func (h *VolumeBalanceHandler) Capability() *plugin_pb.JobTypeCapability {
return &plugin_pb.JobTypeCapability{
JobType: "volume_balance",
CanDetect: true,
CanExecute: true,
MaxDetectionConcurrency: 1,
MaxExecutionConcurrency: 1,
DisplayName: "Volume Balance",
Description: "Moves volumes between servers to reduce skew in volume distribution",
}
}
func (h *VolumeBalanceHandler) Descriptor() *plugin_pb.JobTypeDescriptor {
return &plugin_pb.JobTypeDescriptor{
JobType: "volume_balance",
DisplayName: "Volume Balance",
Description: "Detect and execute volume moves to balance server load",
Icon: "fas fa-balance-scale",
DescriptorVersion: 1,
AdminConfigForm: &plugin_pb.ConfigForm{
FormId: "volume-balance-admin",
Title: "Volume Balance Admin Config",
Description: "Admin-side controls for volume balance detection scope.",
Sections: []*plugin_pb.ConfigSection{
{
SectionId: "scope",
Title: "Scope",
Description: "Optional filters applied before balance detection.",
Fields: []*plugin_pb.ConfigField{
{
Name: "collection_filter",
Label: "Collection Filter",
Description: "Only detect balance opportunities in this collection when set.",
Placeholder: "all collections",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_STRING,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_TEXT,
},
},
},
},
DefaultValues: map[string]*plugin_pb.ConfigValue{
"collection_filter": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: ""},
},
},
},
WorkerConfigForm: &plugin_pb.ConfigForm{
FormId: "volume-balance-worker",
Title: "Volume Balance Worker Config",
Description: "Worker-side balance thresholds.",
Sections: []*plugin_pb.ConfigSection{
{
SectionId: "thresholds",
Title: "Detection Thresholds",
Description: "Controls for when balance jobs should be proposed.",
Fields: []*plugin_pb.ConfigField{
{
Name: "imbalance_threshold",
Label: "Imbalance Threshold",
Description: "Detect when skew exceeds this ratio.",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_DOUBLE,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_NUMBER,
Required: true,
MinValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0}},
MaxValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 1}},
},
{
Name: "min_server_count",
Label: "Minimum Server Count",
Description: "Require at least this many servers for balancing.",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_INT64,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_NUMBER,
Required: true,
MinValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 2}},
},
{
Name: "min_interval_seconds",
Label: "Minimum Detection Interval (s)",
Description: "Skip detection if the last successful run is more recent than this interval.",
FieldType: plugin_pb.ConfigFieldType_CONFIG_FIELD_TYPE_INT64,
Widget: plugin_pb.ConfigWidget_CONFIG_WIDGET_NUMBER,
Required: true,
MinValue: &plugin_pb.ConfigValue{Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 0}},
},
},
},
},
DefaultValues: map[string]*plugin_pb.ConfigValue{
"imbalance_threshold": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0.2},
},
"min_server_count": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 2},
},
"min_interval_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 30 * 60},
},
},
},
AdminRuntimeDefaults: &plugin_pb.AdminRuntimeDefaults{
Enabled: true,
DetectionIntervalSeconds: 30 * 60,
DetectionTimeoutSeconds: 120,
MaxJobsPerDetection: 100,
GlobalExecutionConcurrency: 16,
PerWorkerExecutionConcurrency: 4,
RetryLimit: 1,
RetryBackoffSeconds: 15,
},
WorkerDefaultValues: map[string]*plugin_pb.ConfigValue{
"imbalance_threshold": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0.2},
},
"min_server_count": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 2},
},
"min_interval_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 30 * 60},
},
},
}
}
func (h *VolumeBalanceHandler) Detect(
ctx context.Context,
request *plugin_pb.RunDetectionRequest,
sender DetectionSender,
) error {
if request == nil {
return fmt.Errorf("run detection request is nil")
}
if sender == nil {
return fmt.Errorf("detection sender is nil")
}
if request.JobType != "" && request.JobType != "volume_balance" {
return fmt.Errorf("job type %q is not handled by volume_balance worker", request.JobType)
}
workerConfig := deriveBalanceWorkerConfig(request.GetWorkerConfigValues())
if shouldSkipDetectionByInterval(request.GetLastSuccessfulRun(), workerConfig.MinIntervalSeconds) {
minInterval := time.Duration(workerConfig.MinIntervalSeconds) * time.Second
_ = sender.SendActivity(buildDetectorActivity(
"skipped_by_interval",
fmt.Sprintf("VOLUME BALANCE: Detection skipped due to min interval (%s)", minInterval),
map[string]*plugin_pb.ConfigValue{
"min_interval_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(workerConfig.MinIntervalSeconds)},
},
},
))
if err := sender.SendProposals(&plugin_pb.DetectionProposals{
JobType: "volume_balance",
Proposals: []*plugin_pb.JobProposal{},
HasMore: false,
}); err != nil {
return err
}
return sender.SendComplete(&plugin_pb.DetectionComplete{
JobType: "volume_balance",
Success: true,
TotalProposals: 0,
})
}
collectionFilter := strings.TrimSpace(readStringConfig(request.GetAdminConfigValues(), "collection_filter", ""))
masters := make([]string, 0)
if request.ClusterContext != nil {
masters = append(masters, request.ClusterContext.MasterGrpcAddresses...)
}
metrics, activeTopology, err := h.collectVolumeMetrics(ctx, masters, collectionFilter)
if err != nil {
return err
}
clusterInfo := &workertypes.ClusterInfo{ActiveTopology: activeTopology}
results, err := balancetask.Detection(metrics, clusterInfo, workerConfig.TaskConfig)
if err != nil {
return err
}
if traceErr := emitVolumeBalanceDetectionDecisionTrace(sender, metrics, workerConfig.TaskConfig, results); traceErr != nil {
glog.Warningf("Plugin worker failed to emit volume_balance detection trace: %v", traceErr)
}
maxResults := int(request.MaxResults)
hasMore := false
if maxResults > 0 && len(results) > maxResults {
hasMore = true
results = results[:maxResults]
}
proposals := make([]*plugin_pb.JobProposal, 0, len(results))
for _, result := range results {
proposal, proposalErr := buildVolumeBalanceProposal(result)
if proposalErr != nil {
glog.Warningf("Plugin worker skip invalid volume_balance proposal: %v", proposalErr)
continue
}
proposals = append(proposals, proposal)
}
if err := sender.SendProposals(&plugin_pb.DetectionProposals{
JobType: "volume_balance",
Proposals: proposals,
HasMore: hasMore,
}); err != nil {
return err
}
return sender.SendComplete(&plugin_pb.DetectionComplete{
JobType: "volume_balance",
Success: true,
TotalProposals: int32(len(proposals)),
})
}
func emitVolumeBalanceDetectionDecisionTrace(
sender DetectionSender,
metrics []*workertypes.VolumeHealthMetrics,
taskConfig *balancetask.Config,
results []*workertypes.TaskDetectionResult,
) error {
if sender == nil || taskConfig == nil {
return nil
}
totalVolumes := len(metrics)
summaryMessage := ""
if len(results) == 0 {
summaryMessage = fmt.Sprintf(
"BALANCE: No tasks created for %d volumes across %d disk type(s). Threshold=%.1f%%, MinServers=%d",
totalVolumes,
countBalanceDiskTypes(metrics),
taskConfig.ImbalanceThreshold*100,
taskConfig.MinServerCount,
)
} else {
summaryMessage = fmt.Sprintf(
"BALANCE: Created %d task(s) for %d volumes across %d disk type(s). Threshold=%.1f%%, MinServers=%d",
len(results),
totalVolumes,
countBalanceDiskTypes(metrics),
taskConfig.ImbalanceThreshold*100,
taskConfig.MinServerCount,
)
}
if err := sender.SendActivity(buildDetectorActivity("decision_summary", summaryMessage, map[string]*plugin_pb.ConfigValue{
"total_volumes": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(totalVolumes)},
},
"selected_tasks": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(len(results))},
},
"imbalance_threshold_percent": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: taskConfig.ImbalanceThreshold * 100},
},
"min_server_count": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(taskConfig.MinServerCount)},
},
})); err != nil {
return err
}
volumesByDiskType := make(map[string][]*workertypes.VolumeHealthMetrics)
for _, metric := range metrics {
if metric == nil {
continue
}
diskType := strings.TrimSpace(metric.DiskType)
if diskType == "" {
diskType = "unknown"
}
volumesByDiskType[diskType] = append(volumesByDiskType[diskType], metric)
}
diskTypes := make([]string, 0, len(volumesByDiskType))
for diskType := range volumesByDiskType {
diskTypes = append(diskTypes, diskType)
}
sort.Strings(diskTypes)
const minVolumeCount = 2
detailCount := 0
for _, diskType := range diskTypes {
diskMetrics := volumesByDiskType[diskType]
volumeCount := len(diskMetrics)
if volumeCount < minVolumeCount {
message := fmt.Sprintf(
"BALANCE [%s]: No tasks created - cluster too small (%d volumes, need ≥%d)",
diskType,
volumeCount,
minVolumeCount,
)
if err := sender.SendActivity(buildDetectorActivity("decision_disk_type", message, map[string]*plugin_pb.ConfigValue{
"disk_type": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: diskType},
},
"volume_count": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(volumeCount)},
},
"required_min_volume_count": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: minVolumeCount},
},
})); err != nil {
return err
}
detailCount++
if detailCount >= 3 {
break
}
continue
}
serverVolumeCounts := make(map[string]int)
for _, metric := range diskMetrics {
serverVolumeCounts[metric.Server]++
}
if len(serverVolumeCounts) < taskConfig.MinServerCount {
message := fmt.Sprintf(
"BALANCE [%s]: No tasks created - too few servers (%d servers, need ≥%d)",
diskType,
len(serverVolumeCounts),
taskConfig.MinServerCount,
)
if err := sender.SendActivity(buildDetectorActivity("decision_disk_type", message, map[string]*plugin_pb.ConfigValue{
"disk_type": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: diskType},
},
"server_count": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(len(serverVolumeCounts))},
},
"required_min_server_count": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(taskConfig.MinServerCount)},
},
})); err != nil {
return err
}
detailCount++
if detailCount >= 3 {
break
}
continue
}
totalDiskTypeVolumes := len(diskMetrics)
avgVolumesPerServer := float64(totalDiskTypeVolumes) / float64(len(serverVolumeCounts))
maxVolumes := 0
minVolumes := totalDiskTypeVolumes
maxServer := ""
minServer := ""
for server, count := range serverVolumeCounts {
if count > maxVolumes {
maxVolumes = count
maxServer = server
}
if count < minVolumes {
minVolumes = count
minServer = server
}
}
imbalanceRatio := 0.0
if avgVolumesPerServer > 0 {
imbalanceRatio = float64(maxVolumes-minVolumes) / avgVolumesPerServer
}
stage := "decision_disk_type"
message := ""
if imbalanceRatio <= taskConfig.ImbalanceThreshold {
message = fmt.Sprintf(
"BALANCE [%s]: No tasks created - cluster well balanced. Imbalance=%.1f%% (threshold=%.1f%%). Max=%d volumes on %s, Min=%d on %s, Avg=%.1f",
diskType,
imbalanceRatio*100,
taskConfig.ImbalanceThreshold*100,
maxVolumes,
maxServer,
minVolumes,
minServer,
avgVolumesPerServer,
)
} else {
stage = "decision_candidate"
message = fmt.Sprintf(
"BALANCE [%s]: Candidate detected. Imbalance=%.1f%% (threshold=%.1f%%). Max=%d volumes on %s, Min=%d on %s, Avg=%.1f",
diskType,
imbalanceRatio*100,
taskConfig.ImbalanceThreshold*100,
maxVolumes,
maxServer,
minVolumes,
minServer,
avgVolumesPerServer,
)
}
if err := sender.SendActivity(buildDetectorActivity(stage, message, map[string]*plugin_pb.ConfigValue{
"disk_type": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: diskType},
},
"volume_count": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(totalDiskTypeVolumes)},
},
"server_count": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(len(serverVolumeCounts))},
},
"imbalance_percent": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: imbalanceRatio * 100},
},
"threshold_percent": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: taskConfig.ImbalanceThreshold * 100},
},
"max_volumes": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(maxVolumes)},
},
"min_volumes": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(minVolumes)},
},
"avg_volumes_per_server": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: avgVolumesPerServer},
},
})); err != nil {
return err
}
detailCount++
if detailCount >= 3 {
break
}
}
return nil
}
func countBalanceDiskTypes(metrics []*workertypes.VolumeHealthMetrics) int {
diskTypes := make(map[string]struct{})
for _, metric := range metrics {
if metric == nil {
continue
}
diskType := strings.TrimSpace(metric.DiskType)
if diskType == "" {
diskType = "unknown"
}
diskTypes[diskType] = struct{}{}
}
return len(diskTypes)
}
func (h *VolumeBalanceHandler) Execute(
ctx context.Context,
request *plugin_pb.ExecuteJobRequest,
sender ExecutionSender,
) error {
if request == nil || request.Job == nil {
return fmt.Errorf("execute request/job is nil")
}
if sender == nil {
return fmt.Errorf("execution sender is nil")
}
if request.Job.JobType != "" && request.Job.JobType != "volume_balance" {
return fmt.Errorf("job type %q is not handled by volume_balance worker", request.Job.JobType)
}
params, err := decodeVolumeBalanceTaskParams(request.Job)
if err != nil {
return err
}
if len(params.Sources) == 0 || strings.TrimSpace(params.Sources[0].Node) == "" {
return fmt.Errorf("volume balance source node is required")
}
if len(params.Targets) == 0 || strings.TrimSpace(params.Targets[0].Node) == "" {
return fmt.Errorf("volume balance target node is required")
}
applyBalanceExecutionDefaults(params)
task := balancetask.NewBalanceTask(
request.Job.JobId,
params.Sources[0].Node,
params.VolumeId,
params.Collection,
)
task.SetProgressCallback(func(progress float64, stage string) {
message := fmt.Sprintf("balance progress %.0f%%", progress)
if strings.TrimSpace(stage) != "" {
message = stage
}
_ = sender.SendProgress(&plugin_pb.JobProgressUpdate{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
State: plugin_pb.JobState_JOB_STATE_RUNNING,
ProgressPercent: progress,
Stage: stage,
Message: message,
Activities: []*plugin_pb.ActivityEvent{
buildExecutorActivity(stage, message),
},
})
})
if err := sender.SendProgress(&plugin_pb.JobProgressUpdate{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
State: plugin_pb.JobState_JOB_STATE_ASSIGNED,
ProgressPercent: 0,
Stage: "assigned",
Message: "volume balance job accepted",
Activities: []*plugin_pb.ActivityEvent{
buildExecutorActivity("assigned", "volume balance job accepted"),
},
}); err != nil {
return err
}
if err := task.Execute(ctx, params); err != nil {
_ = sender.SendProgress(&plugin_pb.JobProgressUpdate{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
State: plugin_pb.JobState_JOB_STATE_FAILED,
ProgressPercent: 100,
Stage: "failed",
Message: err.Error(),
Activities: []*plugin_pb.ActivityEvent{
buildExecutorActivity("failed", err.Error()),
},
})
return err
}
sourceNode := params.Sources[0].Node
targetNode := params.Targets[0].Node
resultSummary := fmt.Sprintf("volume %d moved from %s to %s", params.VolumeId, sourceNode, targetNode)
return sender.SendCompleted(&plugin_pb.JobCompleted{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
Success: true,
Result: &plugin_pb.JobResult{
Summary: resultSummary,
OutputValues: map[string]*plugin_pb.ConfigValue{
"volume_id": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(params.VolumeId)},
},
"source_server": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: sourceNode},
},
"target_server": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: targetNode},
},
},
},
Activities: []*plugin_pb.ActivityEvent{
buildExecutorActivity("completed", resultSummary),
},
})
}
func (h *VolumeBalanceHandler) collectVolumeMetrics(
ctx context.Context,
masterAddresses []string,
collectionFilter string,
) ([]*workertypes.VolumeHealthMetrics, *topology.ActiveTopology, error) {
// Reuse the same master topology fetch/build flow used by the vacuum handler.
helper := &VacuumHandler{grpcDialOption: h.grpcDialOption}
return helper.collectVolumeMetrics(ctx, masterAddresses, collectionFilter)
}
func deriveBalanceWorkerConfig(values map[string]*plugin_pb.ConfigValue) *volumeBalanceWorkerConfig {
taskConfig := balancetask.NewDefaultConfig()
imbalanceThreshold := readDoubleConfig(values, "imbalance_threshold", taskConfig.ImbalanceThreshold)
if imbalanceThreshold < 0 {
imbalanceThreshold = 0
}
if imbalanceThreshold > 1 {
imbalanceThreshold = 1
}
taskConfig.ImbalanceThreshold = imbalanceThreshold
minServerCount := int(readInt64Config(values, "min_server_count", int64(taskConfig.MinServerCount)))
if minServerCount < 2 {
minServerCount = 2
}
taskConfig.MinServerCount = minServerCount
minIntervalSeconds := int(readInt64Config(values, "min_interval_seconds", 0))
if minIntervalSeconds < 0 {
minIntervalSeconds = 0
}
return &volumeBalanceWorkerConfig{
TaskConfig: taskConfig,
MinIntervalSeconds: minIntervalSeconds,
}
}
func buildVolumeBalanceProposal(
result *workertypes.TaskDetectionResult,
) (*plugin_pb.JobProposal, error) {
if result == nil {
return nil, fmt.Errorf("task detection result is nil")
}
if result.TypedParams == nil {
return nil, fmt.Errorf("missing typed params for volume %d", result.VolumeID)
}
params := proto.Clone(result.TypedParams).(*worker_pb.TaskParams)
applyBalanceExecutionDefaults(params)
paramsPayload, err := proto.Marshal(params)
if err != nil {
return nil, fmt.Errorf("marshal task params: %w", err)
}
proposalID := strings.TrimSpace(result.TaskID)
if proposalID == "" {
proposalID = fmt.Sprintf("volume-balance-%d-%d", result.VolumeID, time.Now().UnixNano())
}
dedupeKey := fmt.Sprintf("volume_balance:%d", result.VolumeID)
if result.Collection != "" {
dedupeKey += ":" + result.Collection
}
sourceNode := ""
if len(params.Sources) > 0 {
sourceNode = strings.TrimSpace(params.Sources[0].Node)
}
targetNode := ""
if len(params.Targets) > 0 {
targetNode = strings.TrimSpace(params.Targets[0].Node)
}
summary := fmt.Sprintf("Balance volume %d", result.VolumeID)
if sourceNode != "" && targetNode != "" {
summary = fmt.Sprintf("Move volume %d from %s to %s", result.VolumeID, sourceNode, targetNode)
}
return &plugin_pb.JobProposal{
ProposalId: proposalID,
DedupeKey: dedupeKey,
JobType: "volume_balance",
Priority: mapTaskPriority(result.Priority),
Summary: summary,
Detail: strings.TrimSpace(result.Reason),
Parameters: map[string]*plugin_pb.ConfigValue{
"task_params_pb": {
Kind: &plugin_pb.ConfigValue_BytesValue{BytesValue: paramsPayload},
},
"volume_id": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: int64(result.VolumeID)},
},
"source_server": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: sourceNode},
},
"target_server": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: targetNode},
},
"collection": {
Kind: &plugin_pb.ConfigValue_StringValue{StringValue: result.Collection},
},
},
Labels: map[string]string{
"task_type": "balance",
"volume_id": fmt.Sprintf("%d", result.VolumeID),
"collection": result.Collection,
"source_node": sourceNode,
"target_node": targetNode,
"source_server": sourceNode,
"target_server": targetNode,
},
}, nil
}
func decodeVolumeBalanceTaskParams(job *plugin_pb.JobSpec) (*worker_pb.TaskParams, error) {
if job == nil {
return nil, fmt.Errorf("job spec is nil")
}
if payload := readBytesConfig(job.Parameters, "task_params_pb"); len(payload) > 0 {
params := &worker_pb.TaskParams{}
if err := proto.Unmarshal(payload, params); err != nil {
return nil, fmt.Errorf("unmarshal task_params_pb: %w", err)
}
if params.TaskId == "" {
params.TaskId = job.JobId
}
return params, nil
}
volumeID := readInt64Config(job.Parameters, "volume_id", 0)
sourceNode := strings.TrimSpace(readStringConfig(job.Parameters, "source_server", ""))
if sourceNode == "" {
sourceNode = strings.TrimSpace(readStringConfig(job.Parameters, "server", ""))
}
targetNode := strings.TrimSpace(readStringConfig(job.Parameters, "target_server", ""))
if targetNode == "" {
targetNode = strings.TrimSpace(readStringConfig(job.Parameters, "target", ""))
}
collection := readStringConfig(job.Parameters, "collection", "")
timeoutSeconds := int32(readInt64Config(job.Parameters, "timeout_seconds", int64(defaultBalanceTimeoutSeconds)))
if timeoutSeconds <= 0 {
timeoutSeconds = defaultBalanceTimeoutSeconds
}
forceMove := readBoolConfig(job.Parameters, "force_move", false)
if volumeID <= 0 {
return nil, fmt.Errorf("missing volume_id in job parameters")
}
if sourceNode == "" {
return nil, fmt.Errorf("missing source_server in job parameters")
}
if targetNode == "" {
return nil, fmt.Errorf("missing target_server in job parameters")
}
return &worker_pb.TaskParams{
TaskId: job.JobId,
VolumeId: uint32(volumeID),
Collection: collection,
Sources: []*worker_pb.TaskSource{
{
Node: sourceNode,
VolumeId: uint32(volumeID),
},
},
Targets: []*worker_pb.TaskTarget{
{
Node: targetNode,
VolumeId: uint32(volumeID),
},
},
TaskParams: &worker_pb.TaskParams_BalanceParams{
BalanceParams: &worker_pb.BalanceTaskParams{
ForceMove: forceMove,
TimeoutSeconds: timeoutSeconds,
},
},
}, nil
}
func applyBalanceExecutionDefaults(params *worker_pb.TaskParams) {
if params == nil {
return
}
balanceParams := params.GetBalanceParams()
if balanceParams == nil {
params.TaskParams = &worker_pb.TaskParams_BalanceParams{
BalanceParams: &worker_pb.BalanceTaskParams{
ForceMove: false,
TimeoutSeconds: defaultBalanceTimeoutSeconds,
},
}
return
}
if balanceParams.TimeoutSeconds <= 0 {
balanceParams.TimeoutSeconds = defaultBalanceTimeoutSeconds
}
}
func readBoolConfig(values map[string]*plugin_pb.ConfigValue, field string, fallback bool) bool {
if values == nil {
return fallback
}
value := values[field]
if value == nil {
return fallback
}
switch kind := value.Kind.(type) {
case *plugin_pb.ConfigValue_BoolValue:
return kind.BoolValue
case *plugin_pb.ConfigValue_Int64Value:
return kind.Int64Value != 0
case *plugin_pb.ConfigValue_DoubleValue:
return kind.DoubleValue != 0
case *plugin_pb.ConfigValue_StringValue:
text := strings.TrimSpace(strings.ToLower(kind.StringValue))
switch text {
case "1", "true", "yes", "on":
return true
case "0", "false", "no", "off":
return false
}
}
return fallback
}

View File

@@ -0,0 +1,283 @@
package pluginworker
import (
"context"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
balancetask "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
workertypes "github.com/seaweedfs/seaweedfs/weed/worker/types"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
func TestDecodeVolumeBalanceTaskParamsFromPayload(t *testing.T) {
expected := &worker_pb.TaskParams{
TaskId: "task-1",
VolumeId: 42,
Collection: "photos",
Sources: []*worker_pb.TaskSource{
{
Node: "10.0.0.1:8080",
VolumeId: 42,
},
},
Targets: []*worker_pb.TaskTarget{
{
Node: "10.0.0.2:8080",
VolumeId: 42,
},
},
TaskParams: &worker_pb.TaskParams_BalanceParams{
BalanceParams: &worker_pb.BalanceTaskParams{
ForceMove: true,
TimeoutSeconds: 1200,
},
},
}
payload, err := proto.Marshal(expected)
if err != nil {
t.Fatalf("marshal payload: %v", err)
}
job := &plugin_pb.JobSpec{
JobId: "job-from-admin",
Parameters: map[string]*plugin_pb.ConfigValue{
"task_params_pb": {Kind: &plugin_pb.ConfigValue_BytesValue{BytesValue: payload}},
},
}
actual, err := decodeVolumeBalanceTaskParams(job)
if err != nil {
t.Fatalf("decodeVolumeBalanceTaskParams() err = %v", err)
}
if !proto.Equal(expected, actual) {
t.Fatalf("decoded params mismatch\nexpected: %+v\nactual: %+v", expected, actual)
}
}
func TestDecodeVolumeBalanceTaskParamsFallback(t *testing.T) {
job := &plugin_pb.JobSpec{
JobId: "job-2",
Parameters: map[string]*plugin_pb.ConfigValue{
"volume_id": {Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 7}},
"source_server": {Kind: &plugin_pb.ConfigValue_StringValue{StringValue: "127.0.0.1:8080"}},
"target_server": {Kind: &plugin_pb.ConfigValue_StringValue{StringValue: "127.0.0.2:8080"}},
"collection": {Kind: &plugin_pb.ConfigValue_StringValue{StringValue: "videos"}},
},
}
params, err := decodeVolumeBalanceTaskParams(job)
if err != nil {
t.Fatalf("decodeVolumeBalanceTaskParams() err = %v", err)
}
if params.TaskId != "job-2" || params.VolumeId != 7 || params.Collection != "videos" {
t.Fatalf("unexpected basic params: %+v", params)
}
if len(params.Sources) != 1 || params.Sources[0].Node != "127.0.0.1:8080" {
t.Fatalf("unexpected sources: %+v", params.Sources)
}
if len(params.Targets) != 1 || params.Targets[0].Node != "127.0.0.2:8080" {
t.Fatalf("unexpected targets: %+v", params.Targets)
}
if params.GetBalanceParams() == nil {
t.Fatalf("expected fallback balance params")
}
}
func TestDeriveBalanceWorkerConfig(t *testing.T) {
values := map[string]*plugin_pb.ConfigValue{
"imbalance_threshold": {
Kind: &plugin_pb.ConfigValue_DoubleValue{DoubleValue: 0.45},
},
"min_server_count": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 5},
},
"min_interval_seconds": {
Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 33},
},
}
cfg := deriveBalanceWorkerConfig(values)
if cfg.TaskConfig.ImbalanceThreshold != 0.45 {
t.Fatalf("expected imbalance_threshold 0.45, got %v", cfg.TaskConfig.ImbalanceThreshold)
}
if cfg.TaskConfig.MinServerCount != 5 {
t.Fatalf("expected min_server_count 5, got %d", cfg.TaskConfig.MinServerCount)
}
if cfg.MinIntervalSeconds != 33 {
t.Fatalf("expected min_interval_seconds 33, got %d", cfg.MinIntervalSeconds)
}
}
func TestBuildVolumeBalanceProposal(t *testing.T) {
params := &worker_pb.TaskParams{
TaskId: "balance-task-1",
VolumeId: 55,
Collection: "images",
Sources: []*worker_pb.TaskSource{
{
Node: "source-a:8080",
VolumeId: 55,
},
},
Targets: []*worker_pb.TaskTarget{
{
Node: "target-b:8080",
VolumeId: 55,
},
},
TaskParams: &worker_pb.TaskParams_BalanceParams{
BalanceParams: &worker_pb.BalanceTaskParams{
TimeoutSeconds: 600,
},
},
}
result := &workertypes.TaskDetectionResult{
TaskID: "balance-task-1",
TaskType: workertypes.TaskTypeBalance,
VolumeID: 55,
Server: "source-a",
Collection: "images",
Priority: workertypes.TaskPriorityHigh,
Reason: "imbalanced load",
TypedParams: params,
}
proposal, err := buildVolumeBalanceProposal(result)
if err != nil {
t.Fatalf("buildVolumeBalanceProposal() err = %v", err)
}
if proposal.JobType != "volume_balance" {
t.Fatalf("unexpected job type %q", proposal.JobType)
}
if proposal.DedupeKey == "" {
t.Fatalf("expected dedupe key")
}
if proposal.Parameters["task_params_pb"] == nil {
t.Fatalf("expected serialized task params")
}
if proposal.Labels["source_node"] != "source-a:8080" {
t.Fatalf("unexpected source label %q", proposal.Labels["source_node"])
}
if proposal.Labels["target_node"] != "target-b:8080" {
t.Fatalf("unexpected target label %q", proposal.Labels["target_node"])
}
}
func TestVolumeBalanceHandlerRejectsUnsupportedJobType(t *testing.T) {
handler := NewVolumeBalanceHandler(nil)
err := handler.Detect(context.Background(), &plugin_pb.RunDetectionRequest{
JobType: "vacuum",
}, noopDetectionSender{})
if err == nil {
t.Fatalf("expected detect job type mismatch error")
}
err = handler.Execute(context.Background(), &plugin_pb.ExecuteJobRequest{
Job: &plugin_pb.JobSpec{JobId: "job-1", JobType: "vacuum"},
}, noopExecutionSender{})
if err == nil {
t.Fatalf("expected execute job type mismatch error")
}
}
func TestVolumeBalanceHandlerDetectSkipsByMinInterval(t *testing.T) {
handler := NewVolumeBalanceHandler(nil)
sender := &recordingDetectionSender{}
err := handler.Detect(context.Background(), &plugin_pb.RunDetectionRequest{
JobType: "volume_balance",
LastSuccessfulRun: timestamppb.New(time.Now().Add(-3 * time.Second)),
WorkerConfigValues: map[string]*plugin_pb.ConfigValue{
"min_interval_seconds": {Kind: &plugin_pb.ConfigValue_Int64Value{Int64Value: 10}},
},
}, sender)
if err != nil {
t.Fatalf("detect returned err = %v", err)
}
if sender.proposals == nil {
t.Fatalf("expected proposals message")
}
if len(sender.proposals.Proposals) != 0 {
t.Fatalf("expected zero proposals, got %d", len(sender.proposals.Proposals))
}
if sender.complete == nil || !sender.complete.Success {
t.Fatalf("expected successful completion message")
}
if len(sender.events) == 0 {
t.Fatalf("expected detector activity events")
}
if !strings.Contains(sender.events[0].Message, "min interval") {
t.Fatalf("unexpected skip-by-interval message: %q", sender.events[0].Message)
}
}
func TestEmitVolumeBalanceDetectionDecisionTraceNoTasks(t *testing.T) {
sender := &recordingDetectionSender{}
config := balancetask.NewDefaultConfig()
config.ImbalanceThreshold = 0.2
config.MinServerCount = 2
metrics := []*workertypes.VolumeHealthMetrics{
{VolumeID: 1, Server: "server-a", DiskType: "hdd"},
{VolumeID: 2, Server: "server-a", DiskType: "hdd"},
{VolumeID: 3, Server: "server-b", DiskType: "hdd"},
{VolumeID: 4, Server: "server-b", DiskType: "hdd"},
}
if err := emitVolumeBalanceDetectionDecisionTrace(sender, metrics, config, nil); err != nil {
t.Fatalf("emitVolumeBalanceDetectionDecisionTrace error: %v", err)
}
if len(sender.events) < 2 {
t.Fatalf("expected at least 2 detection events, got %d", len(sender.events))
}
if sender.events[0].Source != plugin_pb.ActivitySource_ACTIVITY_SOURCE_DETECTOR {
t.Fatalf("expected detector source, got %v", sender.events[0].Source)
}
if !strings.Contains(sender.events[0].Message, "BALANCE: No tasks created for 4 volumes") {
t.Fatalf("unexpected summary message: %q", sender.events[0].Message)
}
foundDiskTypeDecision := false
for _, event := range sender.events {
if strings.Contains(event.Message, "BALANCE [hdd]: No tasks created - cluster well balanced") {
foundDiskTypeDecision = true
break
}
}
if !foundDiskTypeDecision {
t.Fatalf("expected per-disk-type decision message")
}
}
func TestVolumeBalanceDescriptorOmitsExecutionTuningFields(t *testing.T) {
descriptor := NewVolumeBalanceHandler(nil).Descriptor()
if descriptor == nil || descriptor.WorkerConfigForm == nil {
t.Fatalf("expected worker config form in descriptor")
}
if workerConfigFormHasField(descriptor.WorkerConfigForm, "timeout_seconds") {
t.Fatalf("unexpected timeout_seconds in volume balance worker config form")
}
if workerConfigFormHasField(descriptor.WorkerConfigForm, "force_move") {
t.Fatalf("unexpected force_move in volume balance worker config form")
}
}
func workerConfigFormHasField(form *plugin_pb.ConfigForm, fieldName string) bool {
if form == nil {
return false
}
for _, section := range form.Sections {
if section == nil {
continue
}
for _, field := range section.Fields {
if field != nil && field.Name == fieldName {
return true
}
}
}
return false
}

View File

@@ -0,0 +1,939 @@
package pluginworker
import (
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"os"
"sort"
"strings"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
const (
defaultHeartbeatInterval = 15 * time.Second
defaultReconnectDelay = 5 * time.Second
defaultSendBufferSize = 256
)
// DetectionSender sends detection responses for one request.
type DetectionSender interface {
SendProposals(*plugin_pb.DetectionProposals) error
SendComplete(*plugin_pb.DetectionComplete) error
SendActivity(*plugin_pb.ActivityEvent) error
}
// ExecutionSender sends execution progress/completion responses for one request.
type ExecutionSender interface {
SendProgress(*plugin_pb.JobProgressUpdate) error
SendCompleted(*plugin_pb.JobCompleted) error
}
// JobHandler implements one plugin job type on the worker side.
type JobHandler interface {
Capability() *plugin_pb.JobTypeCapability
Descriptor() *plugin_pb.JobTypeDescriptor
Detect(context.Context, *plugin_pb.RunDetectionRequest, DetectionSender) error
Execute(context.Context, *plugin_pb.ExecuteJobRequest, ExecutionSender) error
}
// WorkerOptions configures one plugin worker process.
type WorkerOptions struct {
AdminServer string
WorkerID string
WorkerVersion string
WorkerAddress string
HeartbeatInterval time.Duration
ReconnectDelay time.Duration
MaxDetectionConcurrency int
MaxExecutionConcurrency int
GrpcDialOption grpc.DialOption
Handlers []JobHandler
Handler JobHandler
}
// Worker runs one plugin job handler over plugin.proto stream.
type Worker struct {
opts WorkerOptions
detectSlots chan struct{}
execSlots chan struct{}
handlers map[string]JobHandler
runningMu sync.RWMutex
runningWork map[string]*plugin_pb.RunningWork
workCancelMu sync.Mutex
workCancel map[string]context.CancelFunc
workerID string
connectionMu sync.RWMutex
connected bool
}
// NewWorker creates a plugin worker instance.
func NewWorker(options WorkerOptions) (*Worker, error) {
if strings.TrimSpace(options.AdminServer) == "" {
return nil, fmt.Errorf("admin server is required")
}
if options.GrpcDialOption == nil {
return nil, fmt.Errorf("grpc dial option is required")
}
if options.HeartbeatInterval <= 0 {
options.HeartbeatInterval = defaultHeartbeatInterval
}
if options.ReconnectDelay <= 0 {
options.ReconnectDelay = defaultReconnectDelay
}
if options.MaxDetectionConcurrency <= 0 {
options.MaxDetectionConcurrency = 1
}
if options.MaxExecutionConcurrency <= 0 {
options.MaxExecutionConcurrency = 1
}
if strings.TrimSpace(options.WorkerVersion) == "" {
options.WorkerVersion = "dev"
}
workerID := strings.TrimSpace(options.WorkerID)
if workerID == "" {
workerID = generateWorkerID()
}
workerAddress := strings.TrimSpace(options.WorkerAddress)
if workerAddress == "" {
hostname, _ := os.Hostname()
workerAddress = hostname
}
opts := options
opts.WorkerAddress = workerAddress
allHandlers := make([]JobHandler, 0, len(opts.Handlers)+1)
if opts.Handler != nil {
allHandlers = append(allHandlers, opts.Handler)
}
allHandlers = append(allHandlers, opts.Handlers...)
if len(allHandlers) == 0 {
return nil, fmt.Errorf("at least one job handler is required")
}
handlers := make(map[string]JobHandler, len(allHandlers))
for i, handler := range allHandlers {
if handler == nil {
return nil, fmt.Errorf("job handler at index %d is nil", i)
}
handlerJobType, err := resolveHandlerJobType(handler)
if err != nil {
return nil, fmt.Errorf("resolve job handler at index %d: %w", i, err)
}
key := normalizeJobTypeKey(handlerJobType)
if key == "" {
return nil, fmt.Errorf("job handler at index %d has empty job type", i)
}
if _, found := handlers[key]; found {
return nil, fmt.Errorf("duplicate job handler for job type %q", handlerJobType)
}
handlers[key] = handler
}
if opts.Handler == nil {
opts.Handler = allHandlers[0]
}
w := &Worker{
opts: opts,
detectSlots: make(chan struct{}, opts.MaxDetectionConcurrency),
execSlots: make(chan struct{}, opts.MaxExecutionConcurrency),
handlers: handlers,
runningWork: make(map[string]*plugin_pb.RunningWork),
workCancel: make(map[string]context.CancelFunc),
workerID: workerID,
}
return w, nil
}
// Run keeps the plugin worker connected and reconnects on stream failures.
func (w *Worker) Run(ctx context.Context) error {
adminAddress := pb.ServerToGrpcAddress(w.opts.AdminServer)
for {
select {
case <-ctx.Done():
return nil
default:
}
if err := w.runOnce(ctx, adminAddress); err != nil {
if ctx.Err() != nil {
return nil
}
glog.Warningf("Plugin worker %s stream ended: %v", w.workerID, err)
}
select {
case <-ctx.Done():
return nil
case <-time.After(w.opts.ReconnectDelay):
}
}
}
func (w *Worker) runOnce(ctx context.Context, adminAddress string) error {
defer w.setConnected(false)
dialCtx, cancelDial := context.WithTimeout(ctx, 5*time.Second)
defer cancelDial()
conn, err := pb.GrpcDial(dialCtx, adminAddress, false, w.opts.GrpcDialOption)
if err != nil {
return fmt.Errorf("dial admin %s: %w", adminAddress, err)
}
defer conn.Close()
client := plugin_pb.NewPluginControlServiceClient(conn)
connCtx, cancelConn := context.WithCancel(ctx)
defer cancelConn()
stream, err := client.WorkerStream(connCtx)
if err != nil {
return fmt.Errorf("open worker stream: %w", err)
}
w.setConnected(true)
sendCh := make(chan *plugin_pb.WorkerToAdminMessage, defaultSendBufferSize)
sendErrCh := make(chan error, 1)
send := func(msg *plugin_pb.WorkerToAdminMessage) bool {
if msg == nil {
return false
}
msg.WorkerId = w.workerID
if msg.SentAt == nil {
msg.SentAt = timestamppb.Now()
}
select {
case <-connCtx.Done():
return false
case sendCh <- msg:
return true
}
}
go func() {
for {
select {
case <-connCtx.Done():
return
case msg := <-sendCh:
if msg == nil {
continue
}
if err := stream.Send(msg); err != nil {
select {
case sendErrCh <- err:
default:
}
cancelConn()
return
}
}
}
}()
if !send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_Hello{Hello: w.buildHello()},
}) {
return fmt.Errorf("send worker hello: stream closed")
}
heartbeatTicker := time.NewTicker(w.opts.HeartbeatInterval)
defer heartbeatTicker.Stop()
go func() {
for {
select {
case <-connCtx.Done():
return
case <-heartbeatTicker.C:
w.sendHeartbeat(send)
}
}
}()
for {
select {
case <-connCtx.Done():
return connCtx.Err()
case err := <-sendErrCh:
return fmt.Errorf("send to admin stream: %w", err)
default:
}
message, err := stream.Recv()
if err != nil {
return fmt.Errorf("recv admin message: %w", err)
}
w.handleAdminMessage(connCtx, message, send)
}
}
// IsConnected reports whether the worker currently has an active stream to admin.
func (w *Worker) IsConnected() bool {
w.connectionMu.RLock()
defer w.connectionMu.RUnlock()
return w.connected
}
func (w *Worker) setConnected(connected bool) {
w.connectionMu.Lock()
w.connected = connected
w.connectionMu.Unlock()
}
func (w *Worker) handleAdminMessage(
ctx context.Context,
message *plugin_pb.AdminToWorkerMessage,
send func(*plugin_pb.WorkerToAdminMessage) bool,
) {
if message == nil {
return
}
switch body := message.Body.(type) {
case *plugin_pb.AdminToWorkerMessage_Hello:
_ = body
case *plugin_pb.AdminToWorkerMessage_RequestConfigSchema:
w.handleSchemaRequest(message.GetRequestId(), body.RequestConfigSchema, send)
case *plugin_pb.AdminToWorkerMessage_RunDetectionRequest:
w.handleDetectionRequest(ctx, message.GetRequestId(), body.RunDetectionRequest, send)
case *plugin_pb.AdminToWorkerMessage_ExecuteJobRequest:
w.handleExecuteRequest(ctx, message.GetRequestId(), body.ExecuteJobRequest, send)
case *plugin_pb.AdminToWorkerMessage_CancelRequest:
cancel := body.CancelRequest
targetID := ""
if cancel != nil {
targetID = strings.TrimSpace(cancel.TargetId)
}
accepted := false
ackMessage := "cancel target is required"
if targetID != "" {
if w.cancelWork(targetID) {
accepted = true
ackMessage = "cancel request accepted"
} else {
ackMessage = "cancel target not found"
}
}
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_Acknowledge{Acknowledge: &plugin_pb.WorkerAcknowledge{
RequestId: message.GetRequestId(),
Accepted: accepted,
Message: ackMessage,
}},
})
case *plugin_pb.AdminToWorkerMessage_Shutdown:
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_Acknowledge{Acknowledge: &plugin_pb.WorkerAcknowledge{
RequestId: message.GetRequestId(),
Accepted: true,
Message: "shutdown acknowledged",
}},
})
default:
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_Acknowledge{Acknowledge: &plugin_pb.WorkerAcknowledge{
RequestId: message.GetRequestId(),
Accepted: false,
Message: "unsupported request body",
}},
})
}
}
func (w *Worker) handleSchemaRequest(requestID string, request *plugin_pb.RequestConfigSchema, send func(*plugin_pb.WorkerToAdminMessage) bool) {
jobType := ""
if request != nil {
jobType = strings.TrimSpace(request.JobType)
}
handler, resolvedJobType, err := w.findHandler(jobType)
if err != nil {
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_ConfigSchemaResponse{ConfigSchemaResponse: &plugin_pb.ConfigSchemaResponse{
RequestId: requestID,
JobType: jobType,
Success: false,
ErrorMessage: err.Error(),
}},
})
return
}
descriptor := handler.Descriptor()
if descriptor == nil || descriptor.JobType == "" {
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_ConfigSchemaResponse{ConfigSchemaResponse: &plugin_pb.ConfigSchemaResponse{
RequestId: requestID,
JobType: resolvedJobType,
Success: false,
ErrorMessage: "handler descriptor is not configured",
}},
})
return
}
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_ConfigSchemaResponse{ConfigSchemaResponse: &plugin_pb.ConfigSchemaResponse{
RequestId: requestID,
JobType: descriptor.JobType,
Success: true,
JobTypeDescriptor: descriptor,
}},
})
}
func (w *Worker) handleDetectionRequest(
ctx context.Context,
requestID string,
request *plugin_pb.RunDetectionRequest,
send func(*plugin_pb.WorkerToAdminMessage) bool,
) {
if request == nil {
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_DetectionComplete{DetectionComplete: &plugin_pb.DetectionComplete{
RequestId: requestID,
Success: false,
ErrorMessage: "run detection request is nil",
}},
})
return
}
handler, resolvedJobType, err := w.findHandler(request.JobType)
if err != nil {
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_DetectionComplete{DetectionComplete: &plugin_pb.DetectionComplete{
RequestId: requestID,
JobType: request.JobType,
Success: false,
ErrorMessage: err.Error(),
}},
})
return
}
workKey := "detect:" + requestID
w.setRunningWork(workKey, &plugin_pb.RunningWork{
WorkId: requestID,
Kind: plugin_pb.WorkKind_WORK_KIND_DETECTION,
JobType: resolvedJobType,
State: plugin_pb.JobState_JOB_STATE_ASSIGNED,
ProgressPercent: 0,
Stage: "queued",
})
w.sendHeartbeat(send)
requestCtx, cancelRequest := context.WithCancel(ctx)
w.setWorkCancel(cancelRequest, requestID)
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_Acknowledge{Acknowledge: &plugin_pb.WorkerAcknowledge{
RequestId: requestID,
Accepted: true,
Message: "detection request accepted",
}},
})
go func() {
detectionSender := &detectionSender{
requestID: requestID,
jobType: resolvedJobType,
send: send,
}
defer func() {
w.clearWorkCancel(requestID)
cancelRequest()
w.clearRunningWork(workKey)
w.sendHeartbeat(send)
}()
select {
case <-requestCtx.Done():
detectionSender.SendComplete(&plugin_pb.DetectionComplete{
Success: false,
ErrorMessage: requestCtx.Err().Error(),
})
return
case w.detectSlots <- struct{}{}:
}
defer func() {
<-w.detectSlots
w.sendHeartbeat(send)
}()
w.setRunningWork(workKey, &plugin_pb.RunningWork{
WorkId: requestID,
Kind: plugin_pb.WorkKind_WORK_KIND_DETECTION,
JobType: resolvedJobType,
State: plugin_pb.JobState_JOB_STATE_RUNNING,
ProgressPercent: 0,
Stage: "detecting",
})
w.sendHeartbeat(send)
if err := handler.Detect(requestCtx, request, detectionSender); err != nil {
detectionSender.SendComplete(&plugin_pb.DetectionComplete{
Success: false,
ErrorMessage: err.Error(),
})
}
}()
}
func (w *Worker) handleExecuteRequest(
ctx context.Context,
requestID string,
request *plugin_pb.ExecuteJobRequest,
send func(*plugin_pb.WorkerToAdminMessage) bool,
) {
if request == nil || request.Job == nil {
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_JobCompleted{JobCompleted: &plugin_pb.JobCompleted{
RequestId: requestID,
Success: false,
ErrorMessage: "execute request/job is nil",
}},
})
return
}
handler, resolvedJobType, err := w.findHandler(request.Job.JobType)
if err != nil {
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_JobCompleted{JobCompleted: &plugin_pb.JobCompleted{
RequestId: requestID,
JobId: request.Job.JobId,
JobType: request.Job.JobType,
Success: false,
ErrorMessage: err.Error(),
}},
})
return
}
select {
case w.execSlots <- struct{}{}:
default:
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_JobCompleted{JobCompleted: &plugin_pb.JobCompleted{
RequestId: requestID,
JobId: request.Job.JobId,
JobType: resolvedJobType,
Success: false,
ErrorMessage: "executor is at capacity",
}},
})
return
}
w.sendHeartbeat(send)
workKey := "exec:" + requestID
w.setRunningWork(workKey, &plugin_pb.RunningWork{
WorkId: request.Job.JobId,
Kind: plugin_pb.WorkKind_WORK_KIND_EXECUTION,
JobType: resolvedJobType,
State: plugin_pb.JobState_JOB_STATE_RUNNING,
ProgressPercent: 0,
Stage: "starting",
})
w.sendHeartbeat(send)
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_Acknowledge{Acknowledge: &plugin_pb.WorkerAcknowledge{
RequestId: requestID,
Accepted: true,
Message: "execute request accepted",
}},
})
go func() {
requestCtx, cancelRequest := context.WithCancel(ctx)
w.setWorkCancel(cancelRequest, requestID, request.Job.JobId)
defer func() {
w.clearWorkCancel(requestID, request.Job.JobId)
cancelRequest()
<-w.execSlots
w.clearRunningWork(workKey)
w.sendHeartbeat(send)
}()
executionSender := &executionSender{
requestID: requestID,
jobID: request.Job.JobId,
jobType: resolvedJobType,
send: send,
onProgress: func(progress float64, stage string) {
w.updateRunningExecution(workKey, progress, stage)
},
}
if err := handler.Execute(requestCtx, request, executionSender); err != nil {
executionSender.SendCompleted(&plugin_pb.JobCompleted{
Success: false,
ErrorMessage: err.Error(),
})
}
}()
}
func (w *Worker) buildHello() *plugin_pb.WorkerHello {
jobTypeKeys := make([]string, 0, len(w.handlers))
for key := range w.handlers {
jobTypeKeys = append(jobTypeKeys, key)
}
sort.Strings(jobTypeKeys)
capabilities := make([]*plugin_pb.JobTypeCapability, 0, len(jobTypeKeys))
jobTypes := make([]string, 0, len(jobTypeKeys))
for _, key := range jobTypeKeys {
handler := w.handlers[key]
if handler == nil {
continue
}
jobType, _ := resolveHandlerJobType(handler)
capability := handler.Capability()
if capability == nil {
capability = &plugin_pb.JobTypeCapability{}
} else {
capability = proto.Clone(capability).(*plugin_pb.JobTypeCapability)
}
if strings.TrimSpace(capability.JobType) == "" {
capability.JobType = jobType
}
capability.MaxDetectionConcurrency = int32(cap(w.detectSlots))
capability.MaxExecutionConcurrency = int32(cap(w.execSlots))
capabilities = append(capabilities, capability)
if capability.JobType != "" {
jobTypes = append(jobTypes, capability.JobType)
}
}
instanceID := generateWorkerID()
return &plugin_pb.WorkerHello{
WorkerId: w.workerID,
WorkerInstanceId: "inst-" + instanceID,
Address: w.opts.WorkerAddress,
WorkerVersion: w.opts.WorkerVersion,
ProtocolVersion: "plugin.v1",
Capabilities: capabilities,
Metadata: map[string]string{
"runtime": "plugin",
"job_types": strings.Join(jobTypes, ","),
},
}
}
func (w *Worker) buildHeartbeat() *plugin_pb.WorkerHeartbeat {
w.runningMu.RLock()
running := make([]*plugin_pb.RunningWork, 0, len(w.runningWork))
for _, work := range w.runningWork {
if work == nil {
continue
}
cloned := *work
running = append(running, &cloned)
}
w.runningMu.RUnlock()
detectUsed := len(w.detectSlots)
execUsed := len(w.execSlots)
return &plugin_pb.WorkerHeartbeat{
WorkerId: w.workerID,
RunningWork: running,
DetectionSlotsUsed: int32(detectUsed),
DetectionSlotsTotal: int32(cap(w.detectSlots)),
ExecutionSlotsUsed: int32(execUsed),
ExecutionSlotsTotal: int32(cap(w.execSlots)),
QueuedJobsByType: map[string]int32{},
Metadata: map[string]string{
"runtime": "plugin",
},
}
}
func (w *Worker) sendHeartbeat(send func(*plugin_pb.WorkerToAdminMessage) bool) {
if send == nil {
return
}
send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_Heartbeat{
Heartbeat: w.buildHeartbeat(),
},
})
}
func (w *Worker) setRunningWork(key string, work *plugin_pb.RunningWork) {
if strings.TrimSpace(key) == "" || work == nil {
return
}
w.runningMu.Lock()
w.runningWork[key] = work
w.runningMu.Unlock()
}
func (w *Worker) clearRunningWork(key string) {
w.runningMu.Lock()
delete(w.runningWork, key)
w.runningMu.Unlock()
}
func (w *Worker) updateRunningExecution(key string, progress float64, stage string) {
w.runningMu.Lock()
if running := w.runningWork[key]; running != nil {
running.ProgressPercent = progress
if strings.TrimSpace(stage) != "" {
running.Stage = stage
}
running.State = plugin_pb.JobState_JOB_STATE_RUNNING
}
w.runningMu.Unlock()
}
type detectionSender struct {
requestID string
jobType string
send func(*plugin_pb.WorkerToAdminMessage) bool
}
func (s *detectionSender) SendProposals(proposals *plugin_pb.DetectionProposals) error {
if proposals == nil {
return fmt.Errorf("detection proposals are nil")
}
if proposals.RequestId == "" {
proposals.RequestId = s.requestID
}
if proposals.JobType == "" {
proposals.JobType = s.jobType
}
if !s.send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_DetectionProposals{DetectionProposals: proposals},
}) {
return fmt.Errorf("stream closed")
}
return nil
}
func (s *detectionSender) SendComplete(complete *plugin_pb.DetectionComplete) error {
if complete == nil {
return fmt.Errorf("detection complete is nil")
}
if complete.RequestId == "" {
complete.RequestId = s.requestID
}
if complete.JobType == "" {
complete.JobType = s.jobType
}
if !s.send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_DetectionComplete{DetectionComplete: complete},
}) {
return fmt.Errorf("stream closed")
}
return nil
}
func (s *detectionSender) SendActivity(activity *plugin_pb.ActivityEvent) error {
if activity == nil {
return fmt.Errorf("detection activity is nil")
}
if activity.CreatedAt == nil {
activity.CreatedAt = timestamppb.Now()
}
if activity.Source == plugin_pb.ActivitySource_ACTIVITY_SOURCE_UNSPECIFIED {
activity.Source = plugin_pb.ActivitySource_ACTIVITY_SOURCE_DETECTOR
}
update := &plugin_pb.JobProgressUpdate{
RequestId: s.requestID,
JobType: s.jobType,
State: plugin_pb.JobState_JOB_STATE_RUNNING,
ProgressPercent: 0,
Stage: activity.Stage,
Message: activity.Message,
Activities: []*plugin_pb.ActivityEvent{activity},
UpdatedAt: timestamppb.Now(),
}
if !s.send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_JobProgressUpdate{JobProgressUpdate: update},
}) {
return fmt.Errorf("stream closed")
}
return nil
}
type executionSender struct {
requestID string
jobID string
jobType string
send func(*plugin_pb.WorkerToAdminMessage) bool
onProgress func(progress float64, stage string)
}
func (s *executionSender) SendProgress(progress *plugin_pb.JobProgressUpdate) error {
if progress == nil {
return fmt.Errorf("job progress is nil")
}
if progress.RequestId == "" {
progress.RequestId = s.requestID
}
if progress.JobId == "" {
progress.JobId = s.jobID
}
if progress.JobType == "" {
progress.JobType = s.jobType
}
if progress.UpdatedAt == nil {
progress.UpdatedAt = timestamppb.Now()
}
if s.onProgress != nil {
s.onProgress(progress.ProgressPercent, progress.Stage)
}
if !s.send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_JobProgressUpdate{JobProgressUpdate: progress},
}) {
return fmt.Errorf("stream closed")
}
return nil
}
func (s *executionSender) SendCompleted(completed *plugin_pb.JobCompleted) error {
if completed == nil {
return fmt.Errorf("job completed is nil")
}
if completed.RequestId == "" {
completed.RequestId = s.requestID
}
if completed.JobId == "" {
completed.JobId = s.jobID
}
if completed.JobType == "" {
completed.JobType = s.jobType
}
if completed.CompletedAt == nil {
completed.CompletedAt = timestamppb.Now()
}
if !s.send(&plugin_pb.WorkerToAdminMessage{
Body: &plugin_pb.WorkerToAdminMessage_JobCompleted{JobCompleted: completed},
}) {
return fmt.Errorf("stream closed")
}
return nil
}
func generateWorkerID() string {
random := make([]byte, 3)
if _, err := rand.Read(random); err != nil {
return fmt.Sprintf("plugin-%d", time.Now().UnixNano())
}
return "plugin-" + hex.EncodeToString(random)
}
func (w *Worker) setWorkCancel(cancel context.CancelFunc, keys ...string) {
if cancel == nil {
return
}
w.workCancelMu.Lock()
defer w.workCancelMu.Unlock()
for _, key := range keys {
key = strings.TrimSpace(key)
if key == "" {
continue
}
w.workCancel[key] = cancel
}
}
func (w *Worker) clearWorkCancel(keys ...string) {
w.workCancelMu.Lock()
defer w.workCancelMu.Unlock()
for _, key := range keys {
key = strings.TrimSpace(key)
if key == "" {
continue
}
delete(w.workCancel, key)
}
}
func (w *Worker) cancelWork(targetID string) bool {
targetID = strings.TrimSpace(targetID)
if targetID == "" {
return false
}
w.workCancelMu.Lock()
cancel := w.workCancel[targetID]
w.workCancelMu.Unlock()
if cancel == nil {
return false
}
cancel()
return true
}
func (w *Worker) findHandler(jobType string) (JobHandler, string, error) {
trimmed := strings.TrimSpace(jobType)
if trimmed == "" {
if len(w.handlers) == 1 {
for _, handler := range w.handlers {
resolvedJobType, err := resolveHandlerJobType(handler)
return handler, resolvedJobType, err
}
}
return nil, "", fmt.Errorf("job type is required when worker serves multiple job types")
}
key := normalizeJobTypeKey(trimmed)
handler := w.handlers[key]
if handler == nil {
return nil, "", fmt.Errorf("job type %q is not handled by this worker", trimmed)
}
resolvedJobType, err := resolveHandlerJobType(handler)
if err != nil {
return nil, "", err
}
return handler, resolvedJobType, nil
}
func resolveHandlerJobType(handler JobHandler) (string, error) {
if handler == nil {
return "", fmt.Errorf("job handler is nil")
}
if descriptor := handler.Descriptor(); descriptor != nil {
if jobType := strings.TrimSpace(descriptor.JobType); jobType != "" {
return jobType, nil
}
}
if capability := handler.Capability(); capability != nil {
if jobType := strings.TrimSpace(capability.JobType); jobType != "" {
return jobType, nil
}
}
return "", fmt.Errorf("handler job type is not configured")
}
func normalizeJobTypeKey(jobType string) string {
return strings.ToLower(strings.TrimSpace(jobType))
}

View File

@@ -0,0 +1,599 @@
package pluginworker
import (
"context"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/plugin_pb"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
func TestWorkerBuildHelloUsesConfiguredConcurrency(t *testing.T) {
handler := &testJobHandler{
capability: &plugin_pb.JobTypeCapability{
JobType: "vacuum",
CanDetect: true,
CanExecute: true,
MaxDetectionConcurrency: 99,
MaxExecutionConcurrency: 88,
},
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
}
worker, err := NewWorker(WorkerOptions{
AdminServer: "localhost:23646",
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
Handler: handler,
MaxDetectionConcurrency: 3,
MaxExecutionConcurrency: 4,
})
if err != nil {
t.Fatalf("NewWorker error = %v", err)
}
hello := worker.buildHello()
if hello == nil || len(hello.Capabilities) != 1 {
t.Fatalf("expected one capability in hello")
}
capability := hello.Capabilities[0]
if capability.MaxDetectionConcurrency != 3 {
t.Fatalf("expected max_detection_concurrency=3, got=%d", capability.MaxDetectionConcurrency)
}
if capability.MaxExecutionConcurrency != 4 {
t.Fatalf("expected max_execution_concurrency=4, got=%d", capability.MaxExecutionConcurrency)
}
if capability.JobType != "vacuum" {
t.Fatalf("expected job type vacuum, got=%q", capability.JobType)
}
}
func TestWorkerBuildHelloIncludesMultipleCapabilities(t *testing.T) {
worker, err := NewWorker(WorkerOptions{
AdminServer: "localhost:23646",
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
Handlers: []JobHandler{
&testJobHandler{
capability: &plugin_pb.JobTypeCapability{JobType: "vacuum", CanDetect: true, CanExecute: true},
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
},
&testJobHandler{
capability: &plugin_pb.JobTypeCapability{JobType: "volume_balance", CanDetect: true, CanExecute: true},
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "volume_balance"},
},
},
MaxDetectionConcurrency: 2,
MaxExecutionConcurrency: 3,
})
if err != nil {
t.Fatalf("NewWorker error = %v", err)
}
hello := worker.buildHello()
if hello == nil || len(hello.Capabilities) != 2 {
t.Fatalf("expected two capabilities in hello")
}
found := map[string]bool{}
for _, capability := range hello.Capabilities {
found[capability.JobType] = true
if capability.MaxDetectionConcurrency != 2 {
t.Fatalf("expected max_detection_concurrency=2, got=%d", capability.MaxDetectionConcurrency)
}
if capability.MaxExecutionConcurrency != 3 {
t.Fatalf("expected max_execution_concurrency=3, got=%d", capability.MaxExecutionConcurrency)
}
}
if !found["vacuum"] || !found["volume_balance"] {
t.Fatalf("expected capabilities for vacuum and volume_balance, got=%v", found)
}
}
func TestWorkerCancelWorkByTargetID(t *testing.T) {
worker, err := NewWorker(WorkerOptions{
AdminServer: "localhost:23646",
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
Handler: &testJobHandler{
capability: &plugin_pb.JobTypeCapability{JobType: "vacuum"},
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
},
})
if err != nil {
t.Fatalf("NewWorker error = %v", err)
}
ctx, cancel := context.WithCancel(context.Background())
worker.setWorkCancel(cancel, "request-1", "job-1")
if !worker.cancelWork("request-1") {
t.Fatalf("expected cancel by request id to succeed")
}
select {
case <-ctx.Done():
case <-time.After(100 * time.Millisecond):
t.Fatalf("expected context to be canceled")
}
if !worker.cancelWork("job-1") {
t.Fatalf("expected cancel by job id to succeed")
}
if worker.cancelWork("unknown-target") {
t.Fatalf("expected cancel unknown target to fail")
}
}
func TestWorkerHandleCancelRequestAck(t *testing.T) {
worker, err := NewWorker(WorkerOptions{
AdminServer: "localhost:23646",
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
Handler: &testJobHandler{
capability: &plugin_pb.JobTypeCapability{JobType: "vacuum"},
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
},
})
if err != nil {
t.Fatalf("NewWorker error = %v", err)
}
canceled := false
worker.setWorkCancel(func() { canceled = true }, "job-42")
var response *plugin_pb.WorkerToAdminMessage
ok := worker.handleAdminMessageForTest(&plugin_pb.AdminToWorkerMessage{
RequestId: "cancel-req-1",
Body: &plugin_pb.AdminToWorkerMessage_CancelRequest{
CancelRequest: &plugin_pb.CancelRequest{TargetId: "job-42"},
},
}, func(msg *plugin_pb.WorkerToAdminMessage) bool {
response = msg
return true
})
if !ok {
t.Fatalf("expected send callback to be invoked")
}
if !canceled {
t.Fatalf("expected registered work cancel function to be called")
}
if response == nil || response.GetAcknowledge() == nil || !response.GetAcknowledge().Accepted {
t.Fatalf("expected accepted acknowledge response, got=%+v", response)
}
response = nil
ok = worker.handleAdminMessageForTest(&plugin_pb.AdminToWorkerMessage{
RequestId: "cancel-req-2",
Body: &plugin_pb.AdminToWorkerMessage_CancelRequest{
CancelRequest: &plugin_pb.CancelRequest{TargetId: "missing"},
},
}, func(msg *plugin_pb.WorkerToAdminMessage) bool {
response = msg
return true
})
if !ok {
t.Fatalf("expected send callback to be invoked")
}
if response == nil || response.GetAcknowledge() == nil || response.GetAcknowledge().Accepted {
t.Fatalf("expected rejected acknowledge for missing target, got=%+v", response)
}
}
func TestWorkerSchemaRequestRequiresJobTypeWhenMultipleHandlers(t *testing.T) {
worker, err := NewWorker(WorkerOptions{
AdminServer: "localhost:23646",
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
Handlers: []JobHandler{
&testJobHandler{
capability: &plugin_pb.JobTypeCapability{JobType: "vacuum"},
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
},
&testJobHandler{
capability: &plugin_pb.JobTypeCapability{JobType: "erasure_coding"},
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "erasure_coding"},
},
},
})
if err != nil {
t.Fatalf("NewWorker error = %v", err)
}
var response *plugin_pb.WorkerToAdminMessage
ok := worker.handleAdminMessageForTest(&plugin_pb.AdminToWorkerMessage{
RequestId: "schema-req-1",
Body: &plugin_pb.AdminToWorkerMessage_RequestConfigSchema{
RequestConfigSchema: &plugin_pb.RequestConfigSchema{},
},
}, func(msg *plugin_pb.WorkerToAdminMessage) bool {
response = msg
return true
})
if !ok {
t.Fatalf("expected send callback to be invoked")
}
schema := response.GetConfigSchemaResponse()
if schema == nil || schema.Success {
t.Fatalf("expected schema error response, got=%+v", response)
}
}
func TestWorkerHandleDetectionQueuesWhenAtCapacity(t *testing.T) {
handler := &detectionQueueTestHandler{
capability: &plugin_pb.JobTypeCapability{
JobType: "vacuum",
CanDetect: true,
CanExecute: false,
},
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
detectEntered: make(chan struct{}, 2),
detectContinue: make(chan struct{}, 2),
}
worker, err := NewWorker(WorkerOptions{
AdminServer: "localhost:23646",
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
Handler: handler,
MaxDetectionConcurrency: 1,
})
if err != nil {
t.Fatalf("NewWorker error = %v", err)
}
msgCh := make(chan *plugin_pb.WorkerToAdminMessage, 8)
send := func(msg *plugin_pb.WorkerToAdminMessage) bool {
msgCh <- msg
return true
}
sendDetection := func(requestID string) {
worker.handleAdminMessage(context.Background(), &plugin_pb.AdminToWorkerMessage{
RequestId: requestID,
Body: &plugin_pb.AdminToWorkerMessage_RunDetectionRequest{
RunDetectionRequest: &plugin_pb.RunDetectionRequest{
JobType: "vacuum",
},
},
}, send)
}
sendDetection("detect-1")
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
ack := message.GetAcknowledge()
return ack != nil && ack.RequestId == "detect-1" && ack.Accepted
}, "detection acknowledge detect-1")
<-handler.detectEntered
sendDetection("detect-2")
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
ack := message.GetAcknowledge()
return ack != nil && ack.RequestId == "detect-2" && ack.Accepted
}, "detection acknowledge detect-2")
select {
case unexpected := <-msgCh:
t.Fatalf("did not expect detection completion before slot is available, got=%+v", unexpected)
case <-time.After(100 * time.Millisecond):
}
handler.detectContinue <- struct{}{}
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
complete := message.GetDetectionComplete()
return complete != nil && complete.RequestId == "detect-1" && complete.Success
}, "detection complete detect-1")
<-handler.detectEntered
handler.detectContinue <- struct{}{}
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
complete := message.GetDetectionComplete()
return complete != nil && complete.RequestId == "detect-2" && complete.Success
}, "detection complete detect-2")
}
func TestWorkerHeartbeatReflectsActiveDetectionLoad(t *testing.T) {
handler := &detectionQueueTestHandler{
capability: &plugin_pb.JobTypeCapability{
JobType: "vacuum",
CanDetect: true,
CanExecute: false,
},
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
detectEntered: make(chan struct{}, 1),
detectContinue: make(chan struct{}, 1),
}
worker, err := NewWorker(WorkerOptions{
AdminServer: "localhost:23646",
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
Handler: handler,
MaxDetectionConcurrency: 1,
})
if err != nil {
t.Fatalf("NewWorker error = %v", err)
}
msgCh := make(chan *plugin_pb.WorkerToAdminMessage, 16)
send := func(msg *plugin_pb.WorkerToAdminMessage) bool {
msgCh <- msg
return true
}
requestID := "detect-heartbeat-1"
worker.handleAdminMessage(context.Background(), &plugin_pb.AdminToWorkerMessage{
RequestId: requestID,
Body: &plugin_pb.AdminToWorkerMessage_RunDetectionRequest{
RunDetectionRequest: &plugin_pb.RunDetectionRequest{
JobType: "vacuum",
},
},
}, send)
<-handler.detectEntered
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
heartbeat := message.GetHeartbeat()
return heartbeat != nil &&
heartbeat.DetectionSlotsUsed > 0 &&
heartbeatHasRunningWork(heartbeat, requestID, plugin_pb.WorkKind_WORK_KIND_DETECTION)
}, "active detection heartbeat")
handler.detectContinue <- struct{}{}
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
complete := message.GetDetectionComplete()
return complete != nil && complete.RequestId == requestID && complete.Success
}, "detection complete")
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
heartbeat := message.GetHeartbeat()
return heartbeat != nil && heartbeat.DetectionSlotsUsed == 0 &&
!heartbeatHasRunningWork(heartbeat, requestID, plugin_pb.WorkKind_WORK_KIND_DETECTION)
}, "idle detection heartbeat")
}
func TestWorkerHeartbeatReflectsActiveExecutionLoad(t *testing.T) {
handler := &executionHeartbeatTestHandler{
capability: &plugin_pb.JobTypeCapability{
JobType: "vacuum",
CanDetect: false,
CanExecute: true,
},
descriptor: &plugin_pb.JobTypeDescriptor{JobType: "vacuum"},
executeEntered: make(chan struct{}, 1),
executeDone: make(chan struct{}, 1),
}
worker, err := NewWorker(WorkerOptions{
AdminServer: "localhost:23646",
GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
Handler: handler,
MaxExecutionConcurrency: 1,
})
if err != nil {
t.Fatalf("NewWorker error = %v", err)
}
msgCh := make(chan *plugin_pb.WorkerToAdminMessage, 16)
send := func(msg *plugin_pb.WorkerToAdminMessage) bool {
msgCh <- msg
return true
}
requestID := "exec-heartbeat-1"
jobID := "job-heartbeat-1"
worker.handleAdminMessage(context.Background(), &plugin_pb.AdminToWorkerMessage{
RequestId: requestID,
Body: &plugin_pb.AdminToWorkerMessage_ExecuteJobRequest{
ExecuteJobRequest: &plugin_pb.ExecuteJobRequest{
Job: &plugin_pb.JobSpec{
JobId: jobID,
JobType: "vacuum",
},
},
},
}, send)
<-handler.executeEntered
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
heartbeat := message.GetHeartbeat()
return heartbeat != nil &&
heartbeat.ExecutionSlotsUsed > 0 &&
heartbeatHasRunningWork(heartbeat, jobID, plugin_pb.WorkKind_WORK_KIND_EXECUTION)
}, "active execution heartbeat")
handler.executeDone <- struct{}{}
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
completed := message.GetJobCompleted()
return completed != nil && completed.RequestId == requestID && completed.Success
}, "execution complete")
waitForWorkerMessage(t, msgCh, func(message *plugin_pb.WorkerToAdminMessage) bool {
heartbeat := message.GetHeartbeat()
return heartbeat != nil && heartbeat.ExecutionSlotsUsed == 0 &&
!heartbeatHasRunningWork(heartbeat, jobID, plugin_pb.WorkKind_WORK_KIND_EXECUTION)
}, "idle execution heartbeat")
}
type testJobHandler struct {
capability *plugin_pb.JobTypeCapability
descriptor *plugin_pb.JobTypeDescriptor
}
func (h *testJobHandler) Capability() *plugin_pb.JobTypeCapability {
return h.capability
}
func (h *testJobHandler) Descriptor() *plugin_pb.JobTypeDescriptor {
return h.descriptor
}
func (h *testJobHandler) Detect(context.Context, *plugin_pb.RunDetectionRequest, DetectionSender) error {
return nil
}
func (h *testJobHandler) Execute(context.Context, *plugin_pb.ExecuteJobRequest, ExecutionSender) error {
return nil
}
type detectionQueueTestHandler struct {
capability *plugin_pb.JobTypeCapability
descriptor *plugin_pb.JobTypeDescriptor
detectEntered chan struct{}
detectContinue chan struct{}
}
func (h *detectionQueueTestHandler) Capability() *plugin_pb.JobTypeCapability {
return h.capability
}
func (h *detectionQueueTestHandler) Descriptor() *plugin_pb.JobTypeDescriptor {
return h.descriptor
}
func (h *detectionQueueTestHandler) Detect(ctx context.Context, _ *plugin_pb.RunDetectionRequest, sender DetectionSender) error {
select {
case h.detectEntered <- struct{}{}:
default:
}
select {
case <-ctx.Done():
return ctx.Err()
case <-h.detectContinue:
}
return sender.SendComplete(&plugin_pb.DetectionComplete{
Success: true,
})
}
func (h *detectionQueueTestHandler) Execute(context.Context, *plugin_pb.ExecuteJobRequest, ExecutionSender) error {
return nil
}
type executionHeartbeatTestHandler struct {
capability *plugin_pb.JobTypeCapability
descriptor *plugin_pb.JobTypeDescriptor
executeEntered chan struct{}
executeDone chan struct{}
}
func (h *executionHeartbeatTestHandler) Capability() *plugin_pb.JobTypeCapability {
return h.capability
}
func (h *executionHeartbeatTestHandler) Descriptor() *plugin_pb.JobTypeDescriptor {
return h.descriptor
}
func (h *executionHeartbeatTestHandler) Detect(context.Context, *plugin_pb.RunDetectionRequest, DetectionSender) error {
return nil
}
func (h *executionHeartbeatTestHandler) Execute(ctx context.Context, request *plugin_pb.ExecuteJobRequest, sender ExecutionSender) error {
select {
case h.executeEntered <- struct{}{}:
default:
}
select {
case <-ctx.Done():
return ctx.Err()
case <-h.executeDone:
}
return sender.SendCompleted(&plugin_pb.JobCompleted{
JobId: request.Job.JobId,
JobType: request.Job.JobType,
Success: true,
})
}
func recvWorkerMessage(t *testing.T, msgCh <-chan *plugin_pb.WorkerToAdminMessage) *plugin_pb.WorkerToAdminMessage {
t.Helper()
select {
case msg := <-msgCh:
return msg
case <-time.After(2 * time.Second):
t.Fatal("timed out waiting for worker message")
return nil
}
}
func expectDetectionAck(t *testing.T, message *plugin_pb.WorkerToAdminMessage, requestID string) {
t.Helper()
ack := message.GetAcknowledge()
if ack == nil {
t.Fatalf("expected acknowledge for request %q, got=%+v", requestID, message)
}
if ack.RequestId != requestID {
t.Fatalf("expected acknowledge request_id=%q, got=%q", requestID, ack.RequestId)
}
if !ack.Accepted {
t.Fatalf("expected acknowledge accepted for request %q, got=%+v", requestID, ack)
}
}
func expectDetectionCompleteSuccess(t *testing.T, message *plugin_pb.WorkerToAdminMessage, requestID string) {
t.Helper()
complete := message.GetDetectionComplete()
if complete == nil {
t.Fatalf("expected detection complete for request %q, got=%+v", requestID, message)
}
if complete.RequestId != requestID {
t.Fatalf("expected detection complete request_id=%q, got=%q", requestID, complete.RequestId)
}
if !complete.Success {
t.Fatalf("expected successful detection complete for request %q, got=%+v", requestID, complete)
}
}
func waitForWorkerMessage(
t *testing.T,
msgCh <-chan *plugin_pb.WorkerToAdminMessage,
predicate func(*plugin_pb.WorkerToAdminMessage) bool,
description string,
) *plugin_pb.WorkerToAdminMessage {
t.Helper()
timeout := time.NewTimer(3 * time.Second)
defer timeout.Stop()
for {
select {
case message := <-msgCh:
if predicate(message) {
return message
}
case <-timeout.C:
t.Fatalf("timed out waiting for %s", description)
return nil
}
}
}
func heartbeatHasRunningWork(heartbeat *plugin_pb.WorkerHeartbeat, workID string, kind plugin_pb.WorkKind) bool {
if heartbeat == nil || workID == "" {
return false
}
for _, work := range heartbeat.RunningWork {
if work == nil {
continue
}
if work.WorkId == workID && work.Kind == kind {
return true
}
}
return false
}
func (w *Worker) handleAdminMessageForTest(
message *plugin_pb.AdminToWorkerMessage,
send func(*plugin_pb.WorkerToAdminMessage) bool,
) bool {
called := false
w.handleAdminMessage(context.Background(), message, func(msg *plugin_pb.WorkerToAdminMessage) bool {
called = true
return send(msg)
})
return called
}

View File

@@ -269,11 +269,24 @@ func (dn *DataNode) ToInfo() (info DataNodeInfo) {
func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo { func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo {
m := &master_pb.DataNodeInfo{ m := &master_pb.DataNodeInfo{
Id: string(dn.Id()), Id: string(dn.Id()),
DiskInfos: make(map[string]*master_pb.DiskInfo), // Start from disk usage counters so empty disks are still represented
// even when there are no volumes/EC shards on this data node yet.
DiskInfos: dn.diskUsages.ToDiskInfo(),
GrpcPort: uint32(dn.GrpcPort), GrpcPort: uint32(dn.GrpcPort),
Address: dn.Url(), // ip:port for connecting to the volume server Address: dn.Url(), // ip:port for connecting to the volume server
} }
if m.DiskInfos == nil {
m.DiskInfos = make(map[string]*master_pb.DiskInfo)
}
for diskType, diskInfo := range m.DiskInfos {
if diskInfo == nil {
m.DiskInfos[diskType] = &master_pb.DiskInfo{Type: diskType}
continue
}
diskInfo.Type = diskType
}
for _, c := range dn.Children() { for _, c := range dn.Children() {
disk := c.(*Disk) disk := c.(*Disk)
m.DiskInfos[string(disk.Id())] = disk.ToDiskInfo() m.DiskInfos[string(disk.Id())] = disk.ToDiskInfo()

View File

@@ -165,6 +165,29 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
} }
func TestDataNodeToDataNodeInfo_IncludeEmptyDiskFromUsage(t *testing.T) {
dn := NewDataNode("node-1")
dn.Ip = "127.0.0.1"
dn.Port = 18080
dn.GrpcPort = 28080
// Simulate a node that has slot counters but no mounted volumes yet.
usage := dn.diskUsages.getOrCreateDisk(types.HardDriveType)
usage.maxVolumeCount = 8
info := dn.ToDataNodeInfo()
diskInfo, found := info.DiskInfos[""]
if !found {
t.Fatalf("expected default disk entry for empty node")
}
if diskInfo.MaxVolumeCount != 8 {
t.Fatalf("unexpected max volume count: got=%d want=8", diskInfo.MaxVolumeCount)
}
if len(diskInfo.VolumeInfos) != 0 {
t.Fatalf("expected no volumes for empty disk, got=%d", len(diskInfo.VolumeInfos))
}
}
func assert(t *testing.T, message string, actual, expected int) { func assert(t *testing.T, message string, actual, expected int) {
if actual != expected { if actual != expected {
t.Fatalf("unexpected %s: %d, expected: %d", message, actual, expected) t.Fatalf("unexpected %s: %d, expected: %d", message, actual, expected)