Admin UI add maintenance menu (#6944)

* add ui for maintenance

* valid config loading. fix workers page.

* refactor

* grpc between admin and workers

* add a long-running bidirectional grpc call between admin and worker
* use the grpc call to heartbeat
* use the grpc call to communicate
* worker can remove the http client
* admin uses http port + 10000 as its default grpc port

* one task one package

* handles connection failures gracefully with exponential backoff

* grpc with insecure tls

* grpc with optional tls

* fix detecting tls

* change time config from nano seconds to seconds

* add tasks with 3 interfaces

* compiles reducing hard coded

* remove a couple of tasks

* remove hard coded references

* reduce hard coded values

* remove hard coded values

* remove hard coded from templ

* refactor maintenance package

* fix import cycle

* simplify

* simplify

* auto register

* auto register factory

* auto register task types

* self register types

* refactor

* simplify

* remove one task

* register ui

* lazy init executor factories

* use registered task types

* DefaultWorkerConfig remove hard coded task types

* remove more hard coded

* implement get maintenance task

* dynamic task configuration

* "System Settings" should only have system level settings

* adjust menu for tasks

* ensure menu not collapsed

* render job configuration well

* use templ for ui of task configuration

* fix ordering

* fix bugs

* saving duration in seconds

* use value and unit for duration

* Delete WORKER_REFACTORING_PLAN.md

* Delete maintenance.json

* Delete custom_worker_example.go

* remove address from workers

* remove old code from ec task

* remove creating collection button

* reconnect with exponential backoff

* worker use security.toml

* start admin server with tls info from security.toml

* fix "weed admin" cli description
This commit is contained in:
Chris Lu
2025-07-06 13:57:02 -07:00
committed by GitHub
parent 302e62d480
commit aa66852304
76 changed files with 18218 additions and 206 deletions

View File

@@ -0,0 +1,82 @@
package balance
import (
"fmt"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// Task implements balance operation to redistribute volumes across volume servers
type Task struct {
*tasks.BaseTask
server string
volumeID uint32
collection string
}
// NewTask creates a new balance task instance
func NewTask(server string, volumeID uint32, collection string) *Task {
task := &Task{
BaseTask: tasks.NewBaseTask(types.TaskTypeBalance),
server: server,
volumeID: volumeID,
collection: collection,
}
return task
}
// Execute executes the balance task
func (t *Task) Execute(params types.TaskParams) error {
glog.Infof("Starting balance task for volume %d on server %s (collection: %s)", t.volumeID, t.server, t.collection)
// Simulate balance operation with progress updates
steps := []struct {
name string
duration time.Duration
progress float64
}{
{"Analyzing cluster state", 2 * time.Second, 15},
{"Identifying optimal placement", 3 * time.Second, 35},
{"Moving volume data", 6 * time.Second, 75},
{"Updating cluster metadata", 2 * time.Second, 95},
{"Verifying balance", 1 * time.Second, 100},
}
for _, step := range steps {
if t.IsCancelled() {
return fmt.Errorf("balance task cancelled")
}
glog.V(1).Infof("Balance task step: %s", step.name)
t.SetProgress(step.progress)
// Simulate work
time.Sleep(step.duration)
}
glog.Infof("Balance task completed for volume %d on server %s", t.volumeID, t.server)
return nil
}
// Validate validates the task parameters
func (t *Task) Validate(params types.TaskParams) error {
if params.VolumeID == 0 {
return fmt.Errorf("volume_id is required")
}
if params.Server == "" {
return fmt.Errorf("server is required")
}
return nil
}
// EstimateTime estimates the time needed for the task
func (t *Task) EstimateTime(params types.TaskParams) time.Duration {
// Base time for balance operation
baseTime := 35 * time.Second
// Could adjust based on volume size or cluster state
return baseTime
}

View File

@@ -0,0 +1,171 @@
package balance
import (
"fmt"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// BalanceDetector implements TaskDetector for balance tasks
type BalanceDetector struct {
enabled bool
threshold float64 // Imbalance threshold (0.1 = 10%)
minCheckInterval time.Duration
minVolumeCount int
lastCheck time.Time
}
// Compile-time interface assertions
var (
_ types.TaskDetector = (*BalanceDetector)(nil)
)
// NewBalanceDetector creates a new balance detector
func NewBalanceDetector() *BalanceDetector {
return &BalanceDetector{
enabled: true,
threshold: 0.1, // 10% imbalance threshold
minCheckInterval: 1 * time.Hour,
minVolumeCount: 10, // Don't balance small clusters
lastCheck: time.Time{},
}
}
// GetTaskType returns the task type
func (d *BalanceDetector) GetTaskType() types.TaskType {
return types.TaskTypeBalance
}
// ScanForTasks checks if cluster balance is needed
func (d *BalanceDetector) ScanForTasks(volumeMetrics []*types.VolumeHealthMetrics, clusterInfo *types.ClusterInfo) ([]*types.TaskDetectionResult, error) {
if !d.enabled {
return nil, nil
}
glog.V(2).Infof("Scanning for balance tasks...")
// Don't check too frequently
if time.Since(d.lastCheck) < d.minCheckInterval {
return nil, nil
}
d.lastCheck = time.Now()
// Skip if cluster is too small
if len(volumeMetrics) < d.minVolumeCount {
glog.V(2).Infof("Cluster too small for balance (%d volumes < %d minimum)", len(volumeMetrics), d.minVolumeCount)
return nil, nil
}
// Analyze volume distribution across servers
serverVolumeCounts := make(map[string]int)
for _, metric := range volumeMetrics {
serverVolumeCounts[metric.Server]++
}
if len(serverVolumeCounts) < 2 {
glog.V(2).Infof("Not enough servers for balance (%d servers)", len(serverVolumeCounts))
return nil, nil
}
// Calculate balance metrics
totalVolumes := len(volumeMetrics)
avgVolumesPerServer := float64(totalVolumes) / float64(len(serverVolumeCounts))
maxVolumes := 0
minVolumes := totalVolumes
maxServer := ""
minServer := ""
for server, count := range serverVolumeCounts {
if count > maxVolumes {
maxVolumes = count
maxServer = server
}
if count < minVolumes {
minVolumes = count
minServer = server
}
}
// Check if imbalance exceeds threshold
imbalanceRatio := float64(maxVolumes-minVolumes) / avgVolumesPerServer
if imbalanceRatio <= d.threshold {
glog.V(2).Infof("Cluster is balanced (imbalance ratio: %.2f <= %.2f)", imbalanceRatio, d.threshold)
return nil, nil
}
// Create balance task
reason := fmt.Sprintf("Cluster imbalance detected: %.1f%% (max: %d on %s, min: %d on %s, avg: %.1f)",
imbalanceRatio*100, maxVolumes, maxServer, minVolumes, minServer, avgVolumesPerServer)
task := &types.TaskDetectionResult{
TaskType: types.TaskTypeBalance,
Priority: types.TaskPriorityNormal,
Reason: reason,
ScheduleAt: time.Now(),
Parameters: map[string]interface{}{
"imbalance_ratio": imbalanceRatio,
"threshold": d.threshold,
"max_volumes": maxVolumes,
"min_volumes": minVolumes,
"avg_volumes_per_server": avgVolumesPerServer,
"max_server": maxServer,
"min_server": minServer,
"total_servers": len(serverVolumeCounts),
},
}
glog.V(1).Infof("🔄 Found balance task: %s", reason)
return []*types.TaskDetectionResult{task}, nil
}
// ScanInterval returns how often to scan
func (d *BalanceDetector) ScanInterval() time.Duration {
return d.minCheckInterval
}
// IsEnabled returns whether the detector is enabled
func (d *BalanceDetector) IsEnabled() bool {
return d.enabled
}
// SetEnabled sets whether the detector is enabled
func (d *BalanceDetector) SetEnabled(enabled bool) {
d.enabled = enabled
glog.V(1).Infof("🔄 Balance detector enabled: %v", enabled)
}
// SetThreshold sets the imbalance threshold
func (d *BalanceDetector) SetThreshold(threshold float64) {
d.threshold = threshold
glog.V(1).Infof("🔄 Balance threshold set to: %.1f%%", threshold*100)
}
// SetMinCheckInterval sets the minimum time between balance checks
func (d *BalanceDetector) SetMinCheckInterval(interval time.Duration) {
d.minCheckInterval = interval
glog.V(1).Infof("🔄 Balance check interval set to: %v", interval)
}
// SetMinVolumeCount sets the minimum volume count for balance operations
func (d *BalanceDetector) SetMinVolumeCount(count int) {
d.minVolumeCount = count
glog.V(1).Infof("🔄 Balance minimum volume count set to: %d", count)
}
// GetThreshold returns the current imbalance threshold
func (d *BalanceDetector) GetThreshold() float64 {
return d.threshold
}
// GetMinCheckInterval returns the minimum check interval
func (d *BalanceDetector) GetMinCheckInterval() time.Duration {
return d.minCheckInterval
}
// GetMinVolumeCount returns the minimum volume count
func (d *BalanceDetector) GetMinVolumeCount() int {
return d.minVolumeCount
}

View File

@@ -0,0 +1,81 @@
package balance
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// Factory creates balance task instances
type Factory struct {
*tasks.BaseTaskFactory
}
// NewFactory creates a new balance task factory
func NewFactory() *Factory {
return &Factory{
BaseTaskFactory: tasks.NewBaseTaskFactory(
types.TaskTypeBalance,
[]string{"balance", "storage", "optimization"},
"Balance data across volume servers for optimal performance",
),
}
}
// Create creates a new balance task instance
func (f *Factory) Create(params types.TaskParams) (types.TaskInterface, error) {
// Validate parameters
if params.VolumeID == 0 {
return nil, fmt.Errorf("volume_id is required")
}
if params.Server == "" {
return nil, fmt.Errorf("server is required")
}
task := NewTask(params.Server, params.VolumeID, params.Collection)
task.SetEstimatedDuration(task.EstimateTime(params))
return task, nil
}
// Shared detector and scheduler instances
var (
sharedDetector *BalanceDetector
sharedScheduler *BalanceScheduler
)
// getSharedInstances returns the shared detector and scheduler instances
func getSharedInstances() (*BalanceDetector, *BalanceScheduler) {
if sharedDetector == nil {
sharedDetector = NewBalanceDetector()
}
if sharedScheduler == nil {
sharedScheduler = NewBalanceScheduler()
}
return sharedDetector, sharedScheduler
}
// GetSharedInstances returns the shared detector and scheduler instances (public access)
func GetSharedInstances() (*BalanceDetector, *BalanceScheduler) {
return getSharedInstances()
}
// Auto-register this task when the package is imported
func init() {
factory := NewFactory()
tasks.AutoRegister(types.TaskTypeBalance, factory)
// Get shared instances for all registrations
detector, scheduler := getSharedInstances()
// Register with types registry
tasks.AutoRegisterTypes(func(registry *types.TaskRegistry) {
registry.RegisterTask(detector, scheduler)
})
// Register with UI registry using the same instances
tasks.AutoRegisterUI(func(uiRegistry *types.UIRegistry) {
RegisterUI(uiRegistry, detector, scheduler)
})
}

View File

@@ -0,0 +1,197 @@
package balance
import (
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// BalanceScheduler implements TaskScheduler for balance tasks
type BalanceScheduler struct {
enabled bool
maxConcurrent int
minInterval time.Duration
lastScheduled map[string]time.Time // track when we last scheduled a balance for each task type
minServerCount int
moveDuringOffHours bool
offHoursStart string
offHoursEnd string
}
// Compile-time interface assertions
var (
_ types.TaskScheduler = (*BalanceScheduler)(nil)
)
// NewBalanceScheduler creates a new balance scheduler
func NewBalanceScheduler() *BalanceScheduler {
return &BalanceScheduler{
enabled: true,
maxConcurrent: 1, // Only run one balance at a time
minInterval: 6 * time.Hour,
lastScheduled: make(map[string]time.Time),
minServerCount: 3,
moveDuringOffHours: true,
offHoursStart: "23:00",
offHoursEnd: "06:00",
}
}
// GetTaskType returns the task type
func (s *BalanceScheduler) GetTaskType() types.TaskType {
return types.TaskTypeBalance
}
// CanScheduleNow determines if a balance task can be scheduled
func (s *BalanceScheduler) CanScheduleNow(task *types.Task, runningTasks []*types.Task, availableWorkers []*types.Worker) bool {
if !s.enabled {
return false
}
// Count running balance tasks
runningBalanceCount := 0
for _, runningTask := range runningTasks {
if runningTask.Type == types.TaskTypeBalance {
runningBalanceCount++
}
}
// Check concurrency limit
if runningBalanceCount >= s.maxConcurrent {
glog.V(3).Infof("⏸️ Balance task blocked: too many running (%d >= %d)", runningBalanceCount, s.maxConcurrent)
return false
}
// Check minimum interval between balance operations
if lastTime, exists := s.lastScheduled["balance"]; exists {
if time.Since(lastTime) < s.minInterval {
timeLeft := s.minInterval - time.Since(lastTime)
glog.V(3).Infof("⏸️ Balance task blocked: too soon (wait %v)", timeLeft)
return false
}
}
// Check if we have available workers
availableWorkerCount := 0
for _, worker := range availableWorkers {
for _, capability := range worker.Capabilities {
if capability == types.TaskTypeBalance {
availableWorkerCount++
break
}
}
}
if availableWorkerCount == 0 {
glog.V(3).Infof("⏸️ Balance task blocked: no available workers")
return false
}
// All checks passed - can schedule
s.lastScheduled["balance"] = time.Now()
glog.V(2).Infof("✅ Balance task can be scheduled (running: %d/%d, workers: %d)",
runningBalanceCount, s.maxConcurrent, availableWorkerCount)
return true
}
// GetPriority returns the priority for balance tasks
func (s *BalanceScheduler) GetPriority(task *types.Task) types.TaskPriority {
// Balance is typically normal priority - not urgent but important for optimization
return types.TaskPriorityNormal
}
// GetMaxConcurrent returns the maximum concurrent balance tasks
func (s *BalanceScheduler) GetMaxConcurrent() int {
return s.maxConcurrent
}
// GetDefaultRepeatInterval returns the default interval to wait before repeating balance tasks
func (s *BalanceScheduler) GetDefaultRepeatInterval() time.Duration {
return s.minInterval
}
// IsEnabled returns whether the scheduler is enabled
func (s *BalanceScheduler) IsEnabled() bool {
return s.enabled
}
// SetEnabled sets whether the scheduler is enabled
func (s *BalanceScheduler) SetEnabled(enabled bool) {
s.enabled = enabled
glog.V(1).Infof("🔄 Balance scheduler enabled: %v", enabled)
}
// SetMaxConcurrent sets the maximum concurrent balance tasks
func (s *BalanceScheduler) SetMaxConcurrent(max int) {
s.maxConcurrent = max
glog.V(1).Infof("🔄 Balance max concurrent set to: %d", max)
}
// SetMinInterval sets the minimum interval between balance operations
func (s *BalanceScheduler) SetMinInterval(interval time.Duration) {
s.minInterval = interval
glog.V(1).Infof("🔄 Balance minimum interval set to: %v", interval)
}
// GetLastScheduled returns when we last scheduled this task type
func (s *BalanceScheduler) GetLastScheduled(taskKey string) time.Time {
if lastTime, exists := s.lastScheduled[taskKey]; exists {
return lastTime
}
return time.Time{}
}
// SetLastScheduled updates when we last scheduled this task type
func (s *BalanceScheduler) SetLastScheduled(taskKey string, when time.Time) {
s.lastScheduled[taskKey] = when
}
// GetMinServerCount returns the minimum server count
func (s *BalanceScheduler) GetMinServerCount() int {
return s.minServerCount
}
// SetMinServerCount sets the minimum server count
func (s *BalanceScheduler) SetMinServerCount(count int) {
s.minServerCount = count
glog.V(1).Infof("🔄 Balance minimum server count set to: %d", count)
}
// GetMoveDuringOffHours returns whether to move only during off-hours
func (s *BalanceScheduler) GetMoveDuringOffHours() bool {
return s.moveDuringOffHours
}
// SetMoveDuringOffHours sets whether to move only during off-hours
func (s *BalanceScheduler) SetMoveDuringOffHours(enabled bool) {
s.moveDuringOffHours = enabled
glog.V(1).Infof("🔄 Balance move during off-hours: %v", enabled)
}
// GetOffHoursStart returns the off-hours start time
func (s *BalanceScheduler) GetOffHoursStart() string {
return s.offHoursStart
}
// SetOffHoursStart sets the off-hours start time
func (s *BalanceScheduler) SetOffHoursStart(start string) {
s.offHoursStart = start
glog.V(1).Infof("🔄 Balance off-hours start time set to: %s", start)
}
// GetOffHoursEnd returns the off-hours end time
func (s *BalanceScheduler) GetOffHoursEnd() string {
return s.offHoursEnd
}
// SetOffHoursEnd sets the off-hours end time
func (s *BalanceScheduler) SetOffHoursEnd(end string) {
s.offHoursEnd = end
glog.V(1).Infof("🔄 Balance off-hours end time set to: %s", end)
}
// GetMinInterval returns the minimum interval
func (s *BalanceScheduler) GetMinInterval() time.Duration {
return s.minInterval
}

View File

@@ -0,0 +1,361 @@
package balance
import (
"fmt"
"html/template"
"strconv"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// UIProvider provides the UI for balance task configuration
type UIProvider struct {
detector *BalanceDetector
scheduler *BalanceScheduler
}
// NewUIProvider creates a new balance UI provider
func NewUIProvider(detector *BalanceDetector, scheduler *BalanceScheduler) *UIProvider {
return &UIProvider{
detector: detector,
scheduler: scheduler,
}
}
// GetTaskType returns the task type
func (ui *UIProvider) GetTaskType() types.TaskType {
return types.TaskTypeBalance
}
// GetDisplayName returns the human-readable name
func (ui *UIProvider) GetDisplayName() string {
return "Volume Balance"
}
// GetDescription returns a description of what this task does
func (ui *UIProvider) GetDescription() string {
return "Redistributes volumes across volume servers to optimize storage utilization and performance"
}
// GetIcon returns the icon CSS class for this task type
func (ui *UIProvider) GetIcon() string {
return "fas fa-balance-scale text-secondary"
}
// BalanceConfig represents the balance configuration
type BalanceConfig struct {
Enabled bool `json:"enabled"`
ImbalanceThreshold float64 `json:"imbalance_threshold"`
ScanIntervalSeconds int `json:"scan_interval_seconds"`
MaxConcurrent int `json:"max_concurrent"`
MinServerCount int `json:"min_server_count"`
MoveDuringOffHours bool `json:"move_during_off_hours"`
OffHoursStart string `json:"off_hours_start"`
OffHoursEnd string `json:"off_hours_end"`
MinIntervalSeconds int `json:"min_interval_seconds"`
}
// Helper functions for duration conversion
func secondsToDuration(seconds int) time.Duration {
return time.Duration(seconds) * time.Second
}
func durationToSeconds(d time.Duration) int {
return int(d.Seconds())
}
// formatDurationForUser formats seconds as a user-friendly duration string
func formatDurationForUser(seconds int) string {
d := secondsToDuration(seconds)
if d < time.Minute {
return fmt.Sprintf("%ds", seconds)
}
if d < time.Hour {
return fmt.Sprintf("%.0fm", d.Minutes())
}
if d < 24*time.Hour {
return fmt.Sprintf("%.1fh", d.Hours())
}
return fmt.Sprintf("%.1fd", d.Hours()/24)
}
// RenderConfigForm renders the configuration form HTML
func (ui *UIProvider) RenderConfigForm(currentConfig interface{}) (template.HTML, error) {
config := ui.getCurrentBalanceConfig()
// Build form using the FormBuilder helper
form := types.NewFormBuilder()
// Detection Settings
form.AddCheckboxField(
"enabled",
"Enable Balance Tasks",
"Whether balance tasks should be automatically created",
config.Enabled,
)
form.AddNumberField(
"imbalance_threshold",
"Imbalance Threshold (%)",
"Trigger balance when storage imbalance exceeds this percentage (0.0-1.0)",
config.ImbalanceThreshold,
true,
)
form.AddDurationField("scan_interval", "Scan Interval", "How often to scan for imbalanced volumes", secondsToDuration(config.ScanIntervalSeconds), true)
// Scheduling Settings
form.AddNumberField(
"max_concurrent",
"Max Concurrent Tasks",
"Maximum number of balance tasks that can run simultaneously",
float64(config.MaxConcurrent),
true,
)
form.AddNumberField(
"min_server_count",
"Minimum Server Count",
"Only balance when at least this many servers are available",
float64(config.MinServerCount),
true,
)
// Timing Settings
form.AddCheckboxField(
"move_during_off_hours",
"Restrict to Off-Hours",
"Only perform balance operations during off-peak hours",
config.MoveDuringOffHours,
)
form.AddTextField(
"off_hours_start",
"Off-Hours Start Time",
"Start time for off-hours window (e.g., 23:00)",
config.OffHoursStart,
false,
)
form.AddTextField(
"off_hours_end",
"Off-Hours End Time",
"End time for off-hours window (e.g., 06:00)",
config.OffHoursEnd,
false,
)
// Timing constraints
form.AddDurationField("min_interval", "Min Interval", "Minimum time between balance operations", secondsToDuration(config.MinIntervalSeconds), true)
// Generate organized form sections using Bootstrap components
html := `
<div class="row">
<div class="col-12">
<div class="card mb-4">
<div class="card-header">
<h5 class="mb-0">
<i class="fas fa-balance-scale me-2"></i>
Balance Configuration
</h5>
</div>
<div class="card-body">
` + string(form.Build()) + `
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-12">
<div class="card mb-3">
<div class="card-header">
<h5 class="mb-0">
<i class="fas fa-exclamation-triangle me-2"></i>
Performance Considerations
</h5>
</div>
<div class="card-body">
<div class="alert alert-warning" role="alert">
<h6 class="alert-heading">Important Considerations:</h6>
<p class="mb-2"><strong>Performance:</strong> Volume balancing involves data movement and can impact cluster performance.</p>
<p class="mb-2"><strong>Recommendation:</strong> Enable off-hours restriction to minimize impact on production workloads.</p>
<p class="mb-0"><strong>Safety:</strong> Requires at least ` + fmt.Sprintf("%d", config.MinServerCount) + ` servers to ensure data safety during moves.</p>
</div>
</div>
</div>
</div>
</div>`
return template.HTML(html), nil
}
// ParseConfigForm parses form data into configuration
func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}, error) {
config := &BalanceConfig{}
// Parse enabled
config.Enabled = len(formData["enabled"]) > 0
// Parse imbalance threshold
if values, ok := formData["imbalance_threshold"]; ok && len(values) > 0 {
threshold, err := strconv.ParseFloat(values[0], 64)
if err != nil {
return nil, fmt.Errorf("invalid imbalance threshold: %v", err)
}
if threshold < 0 || threshold > 1 {
return nil, fmt.Errorf("imbalance threshold must be between 0.0 and 1.0")
}
config.ImbalanceThreshold = threshold
}
// Parse scan interval
if values, ok := formData["scan_interval"]; ok && len(values) > 0 {
duration, err := time.ParseDuration(values[0])
if err != nil {
return nil, fmt.Errorf("invalid scan interval: %v", err)
}
config.ScanIntervalSeconds = int(duration.Seconds())
}
// Parse max concurrent
if values, ok := formData["max_concurrent"]; ok && len(values) > 0 {
maxConcurrent, err := strconv.Atoi(values[0])
if err != nil {
return nil, fmt.Errorf("invalid max concurrent: %v", err)
}
if maxConcurrent < 1 {
return nil, fmt.Errorf("max concurrent must be at least 1")
}
config.MaxConcurrent = maxConcurrent
}
// Parse min server count
if values, ok := formData["min_server_count"]; ok && len(values) > 0 {
minServerCount, err := strconv.Atoi(values[0])
if err != nil {
return nil, fmt.Errorf("invalid min server count: %v", err)
}
if minServerCount < 2 {
return nil, fmt.Errorf("min server count must be at least 2")
}
config.MinServerCount = minServerCount
}
// Parse off-hours settings
config.MoveDuringOffHours = len(formData["move_during_off_hours"]) > 0
if values, ok := formData["off_hours_start"]; ok && len(values) > 0 {
config.OffHoursStart = values[0]
}
if values, ok := formData["off_hours_end"]; ok && len(values) > 0 {
config.OffHoursEnd = values[0]
}
// Parse min interval
if values, ok := formData["min_interval"]; ok && len(values) > 0 {
duration, err := time.ParseDuration(values[0])
if err != nil {
return nil, fmt.Errorf("invalid min interval: %v", err)
}
config.MinIntervalSeconds = int(duration.Seconds())
}
return config, nil
}
// GetCurrentConfig returns the current configuration
func (ui *UIProvider) GetCurrentConfig() interface{} {
return ui.getCurrentBalanceConfig()
}
// ApplyConfig applies the new configuration
func (ui *UIProvider) ApplyConfig(config interface{}) error {
balanceConfig, ok := config.(*BalanceConfig)
if !ok {
return fmt.Errorf("invalid config type, expected *BalanceConfig")
}
// Apply to detector
if ui.detector != nil {
ui.detector.SetEnabled(balanceConfig.Enabled)
ui.detector.SetThreshold(balanceConfig.ImbalanceThreshold)
ui.detector.SetMinCheckInterval(secondsToDuration(balanceConfig.ScanIntervalSeconds))
}
// Apply to scheduler
if ui.scheduler != nil {
ui.scheduler.SetEnabled(balanceConfig.Enabled)
ui.scheduler.SetMaxConcurrent(balanceConfig.MaxConcurrent)
ui.scheduler.SetMinServerCount(balanceConfig.MinServerCount)
ui.scheduler.SetMoveDuringOffHours(balanceConfig.MoveDuringOffHours)
ui.scheduler.SetOffHoursStart(balanceConfig.OffHoursStart)
ui.scheduler.SetOffHoursEnd(balanceConfig.OffHoursEnd)
}
glog.V(1).Infof("Applied balance configuration: enabled=%v, threshold=%.1f%%, max_concurrent=%d, min_servers=%d, off_hours=%v",
balanceConfig.Enabled, balanceConfig.ImbalanceThreshold*100, balanceConfig.MaxConcurrent,
balanceConfig.MinServerCount, balanceConfig.MoveDuringOffHours)
return nil
}
// getCurrentBalanceConfig gets the current configuration from detector and scheduler
func (ui *UIProvider) getCurrentBalanceConfig() *BalanceConfig {
config := &BalanceConfig{
// Default values (fallback if detectors/schedulers are nil)
Enabled: true,
ImbalanceThreshold: 0.1, // 10% imbalance
ScanIntervalSeconds: durationToSeconds(4 * time.Hour),
MaxConcurrent: 1,
MinServerCount: 3,
MoveDuringOffHours: true,
OffHoursStart: "23:00",
OffHoursEnd: "06:00",
MinIntervalSeconds: durationToSeconds(1 * time.Hour),
}
// Get current values from detector
if ui.detector != nil {
config.Enabled = ui.detector.IsEnabled()
config.ImbalanceThreshold = ui.detector.GetThreshold()
config.ScanIntervalSeconds = int(ui.detector.ScanInterval().Seconds())
}
// Get current values from scheduler
if ui.scheduler != nil {
config.MaxConcurrent = ui.scheduler.GetMaxConcurrent()
config.MinServerCount = ui.scheduler.GetMinServerCount()
config.MoveDuringOffHours = ui.scheduler.GetMoveDuringOffHours()
config.OffHoursStart = ui.scheduler.GetOffHoursStart()
config.OffHoursEnd = ui.scheduler.GetOffHoursEnd()
}
return config
}
// RegisterUI registers the balance UI provider with the UI registry
func RegisterUI(uiRegistry *types.UIRegistry, detector *BalanceDetector, scheduler *BalanceScheduler) {
uiProvider := NewUIProvider(detector, scheduler)
uiRegistry.RegisterUI(uiProvider)
glog.V(1).Infof("✅ Registered balance task UI provider")
}
// DefaultBalanceConfig returns default balance configuration
func DefaultBalanceConfig() *BalanceConfig {
return &BalanceConfig{
Enabled: false,
ImbalanceThreshold: 0.3,
ScanIntervalSeconds: durationToSeconds(4 * time.Hour),
MaxConcurrent: 1,
MinServerCount: 3,
MoveDuringOffHours: false,
OffHoursStart: "22:00",
OffHoursEnd: "06:00",
MinIntervalSeconds: durationToSeconds(1 * time.Hour),
}
}

View File

@@ -0,0 +1,369 @@
package balance
import (
"fmt"
"strconv"
"time"
"github.com/seaweedfs/seaweedfs/weed/admin/view/components"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// Helper function to format seconds as duration string
func formatDurationFromSeconds(seconds int) string {
d := time.Duration(seconds) * time.Second
return d.String()
}
// Helper functions to convert between seconds and value+unit format
func secondsToValueAndUnit(seconds int) (float64, string) {
if seconds == 0 {
return 0, "minutes"
}
// Try days first
if seconds%(24*3600) == 0 && seconds >= 24*3600 {
return float64(seconds / (24 * 3600)), "days"
}
// Try hours
if seconds%3600 == 0 && seconds >= 3600 {
return float64(seconds / 3600), "hours"
}
// Default to minutes
return float64(seconds / 60), "minutes"
}
func valueAndUnitToSeconds(value float64, unit string) int {
switch unit {
case "days":
return int(value * 24 * 3600)
case "hours":
return int(value * 3600)
case "minutes":
return int(value * 60)
default:
return int(value * 60) // Default to minutes
}
}
// UITemplProvider provides the templ-based UI for balance task configuration
type UITemplProvider struct {
detector *BalanceDetector
scheduler *BalanceScheduler
}
// NewUITemplProvider creates a new balance templ UI provider
func NewUITemplProvider(detector *BalanceDetector, scheduler *BalanceScheduler) *UITemplProvider {
return &UITemplProvider{
detector: detector,
scheduler: scheduler,
}
}
// GetTaskType returns the task type
func (ui *UITemplProvider) GetTaskType() types.TaskType {
return types.TaskTypeBalance
}
// GetDisplayName returns the human-readable name
func (ui *UITemplProvider) GetDisplayName() string {
return "Volume Balance"
}
// GetDescription returns a description of what this task does
func (ui *UITemplProvider) GetDescription() string {
return "Redistributes volumes across volume servers to optimize storage utilization and performance"
}
// GetIcon returns the icon CSS class for this task type
func (ui *UITemplProvider) GetIcon() string {
return "fas fa-balance-scale text-secondary"
}
// RenderConfigSections renders the configuration as templ section data
func (ui *UITemplProvider) RenderConfigSections(currentConfig interface{}) ([]components.ConfigSectionData, error) {
config := ui.getCurrentBalanceConfig()
// Detection settings section
detectionSection := components.ConfigSectionData{
Title: "Detection Settings",
Icon: "fas fa-search",
Description: "Configure when balance tasks should be triggered",
Fields: []interface{}{
components.CheckboxFieldData{
FormFieldData: components.FormFieldData{
Name: "enabled",
Label: "Enable Balance Tasks",
Description: "Whether balance tasks should be automatically created",
},
Checked: config.Enabled,
},
components.NumberFieldData{
FormFieldData: components.FormFieldData{
Name: "imbalance_threshold",
Label: "Imbalance Threshold",
Description: "Trigger balance when storage imbalance exceeds this percentage (0.0-1.0)",
Required: true,
},
Value: config.ImbalanceThreshold,
Step: "0.01",
Min: floatPtr(0.0),
Max: floatPtr(1.0),
},
components.DurationInputFieldData{
FormFieldData: components.FormFieldData{
Name: "scan_interval",
Label: "Scan Interval",
Description: "How often to scan for imbalanced volumes",
Required: true,
},
Seconds: config.ScanIntervalSeconds,
},
},
}
// Scheduling settings section
schedulingSection := components.ConfigSectionData{
Title: "Scheduling Settings",
Icon: "fas fa-clock",
Description: "Configure task scheduling and concurrency",
Fields: []interface{}{
components.NumberFieldData{
FormFieldData: components.FormFieldData{
Name: "max_concurrent",
Label: "Max Concurrent Tasks",
Description: "Maximum number of balance tasks that can run simultaneously",
Required: true,
},
Value: float64(config.MaxConcurrent),
Step: "1",
Min: floatPtr(1),
},
components.NumberFieldData{
FormFieldData: components.FormFieldData{
Name: "min_server_count",
Label: "Minimum Server Count",
Description: "Only balance when at least this many servers are available",
Required: true,
},
Value: float64(config.MinServerCount),
Step: "1",
Min: floatPtr(1),
},
},
}
// Timing constraints section
timingSection := components.ConfigSectionData{
Title: "Timing Constraints",
Icon: "fas fa-calendar-clock",
Description: "Configure when balance operations are allowed",
Fields: []interface{}{
components.CheckboxFieldData{
FormFieldData: components.FormFieldData{
Name: "move_during_off_hours",
Label: "Restrict to Off-Hours",
Description: "Only perform balance operations during off-peak hours",
},
Checked: config.MoveDuringOffHours,
},
components.TextFieldData{
FormFieldData: components.FormFieldData{
Name: "off_hours_start",
Label: "Off-Hours Start Time",
Description: "Start time for off-hours window (e.g., 23:00)",
},
Value: config.OffHoursStart,
},
components.TextFieldData{
FormFieldData: components.FormFieldData{
Name: "off_hours_end",
Label: "Off-Hours End Time",
Description: "End time for off-hours window (e.g., 06:00)",
},
Value: config.OffHoursEnd,
},
},
}
// Performance impact info section
performanceSection := components.ConfigSectionData{
Title: "Performance Considerations",
Icon: "fas fa-exclamation-triangle",
Description: "Important information about balance operations",
Fields: []interface{}{
components.TextFieldData{
FormFieldData: components.FormFieldData{
Name: "performance_info",
Label: "Performance Impact",
Description: "Volume balancing involves data movement and can impact cluster performance",
},
Value: "Enable off-hours restriction to minimize impact on production workloads",
},
components.TextFieldData{
FormFieldData: components.FormFieldData{
Name: "safety_info",
Label: "Safety Requirements",
Description: fmt.Sprintf("Requires at least %d servers to ensure data safety during moves", config.MinServerCount),
},
Value: "Maintains data safety during volume moves between servers",
},
},
}
return []components.ConfigSectionData{detectionSection, schedulingSection, timingSection, performanceSection}, nil
}
// ParseConfigForm parses form data into configuration
func (ui *UITemplProvider) ParseConfigForm(formData map[string][]string) (interface{}, error) {
config := &BalanceConfig{}
// Parse enabled checkbox
config.Enabled = len(formData["enabled"]) > 0 && formData["enabled"][0] == "on"
// Parse imbalance threshold
if thresholdStr := formData["imbalance_threshold"]; len(thresholdStr) > 0 {
if threshold, err := strconv.ParseFloat(thresholdStr[0], 64); err != nil {
return nil, fmt.Errorf("invalid imbalance threshold: %v", err)
} else if threshold < 0 || threshold > 1 {
return nil, fmt.Errorf("imbalance threshold must be between 0.0 and 1.0")
} else {
config.ImbalanceThreshold = threshold
}
}
// Parse scan interval
if valueStr := formData["scan_interval"]; len(valueStr) > 0 {
if value, err := strconv.ParseFloat(valueStr[0], 64); err != nil {
return nil, fmt.Errorf("invalid scan interval value: %v", err)
} else {
unit := "minutes" // default
if unitStr := formData["scan_interval_unit"]; len(unitStr) > 0 {
unit = unitStr[0]
}
config.ScanIntervalSeconds = valueAndUnitToSeconds(value, unit)
}
}
// Parse max concurrent
if concurrentStr := formData["max_concurrent"]; len(concurrentStr) > 0 {
if concurrent, err := strconv.Atoi(concurrentStr[0]); err != nil {
return nil, fmt.Errorf("invalid max concurrent: %v", err)
} else if concurrent < 1 {
return nil, fmt.Errorf("max concurrent must be at least 1")
} else {
config.MaxConcurrent = concurrent
}
}
// Parse min server count
if serverCountStr := formData["min_server_count"]; len(serverCountStr) > 0 {
if serverCount, err := strconv.Atoi(serverCountStr[0]); err != nil {
return nil, fmt.Errorf("invalid min server count: %v", err)
} else if serverCount < 1 {
return nil, fmt.Errorf("min server count must be at least 1")
} else {
config.MinServerCount = serverCount
}
}
// Parse move during off hours
config.MoveDuringOffHours = len(formData["move_during_off_hours"]) > 0 && formData["move_during_off_hours"][0] == "on"
// Parse off hours start time
if startStr := formData["off_hours_start"]; len(startStr) > 0 {
config.OffHoursStart = startStr[0]
}
// Parse off hours end time
if endStr := formData["off_hours_end"]; len(endStr) > 0 {
config.OffHoursEnd = endStr[0]
}
return config, nil
}
// GetCurrentConfig returns the current configuration
func (ui *UITemplProvider) GetCurrentConfig() interface{} {
return ui.getCurrentBalanceConfig()
}
// ApplyConfig applies the new configuration
func (ui *UITemplProvider) ApplyConfig(config interface{}) error {
balanceConfig, ok := config.(*BalanceConfig)
if !ok {
return fmt.Errorf("invalid config type, expected *BalanceConfig")
}
// Apply to detector
if ui.detector != nil {
ui.detector.SetEnabled(balanceConfig.Enabled)
ui.detector.SetThreshold(balanceConfig.ImbalanceThreshold)
ui.detector.SetMinCheckInterval(time.Duration(balanceConfig.ScanIntervalSeconds) * time.Second)
}
// Apply to scheduler
if ui.scheduler != nil {
ui.scheduler.SetEnabled(balanceConfig.Enabled)
ui.scheduler.SetMaxConcurrent(balanceConfig.MaxConcurrent)
ui.scheduler.SetMinServerCount(balanceConfig.MinServerCount)
ui.scheduler.SetMoveDuringOffHours(balanceConfig.MoveDuringOffHours)
ui.scheduler.SetOffHoursStart(balanceConfig.OffHoursStart)
ui.scheduler.SetOffHoursEnd(balanceConfig.OffHoursEnd)
}
glog.V(1).Infof("Applied balance configuration: enabled=%v, threshold=%.1f%%, max_concurrent=%d, min_servers=%d, off_hours=%v",
balanceConfig.Enabled, balanceConfig.ImbalanceThreshold*100, balanceConfig.MaxConcurrent,
balanceConfig.MinServerCount, balanceConfig.MoveDuringOffHours)
return nil
}
// getCurrentBalanceConfig gets the current configuration from detector and scheduler
func (ui *UITemplProvider) getCurrentBalanceConfig() *BalanceConfig {
config := &BalanceConfig{
// Default values (fallback if detectors/schedulers are nil)
Enabled: true,
ImbalanceThreshold: 0.1, // 10% imbalance
ScanIntervalSeconds: int((4 * time.Hour).Seconds()),
MaxConcurrent: 1,
MinServerCount: 3,
MoveDuringOffHours: true,
OffHoursStart: "23:00",
OffHoursEnd: "06:00",
}
// Get current values from detector
if ui.detector != nil {
config.Enabled = ui.detector.IsEnabled()
config.ImbalanceThreshold = ui.detector.GetThreshold()
config.ScanIntervalSeconds = int(ui.detector.ScanInterval().Seconds())
}
// Get current values from scheduler
if ui.scheduler != nil {
config.MaxConcurrent = ui.scheduler.GetMaxConcurrent()
config.MinServerCount = ui.scheduler.GetMinServerCount()
config.MoveDuringOffHours = ui.scheduler.GetMoveDuringOffHours()
config.OffHoursStart = ui.scheduler.GetOffHoursStart()
config.OffHoursEnd = ui.scheduler.GetOffHoursEnd()
}
return config
}
// floatPtr is a helper function to create float64 pointers
func floatPtr(f float64) *float64 {
return &f
}
// RegisterUITempl registers the balance templ UI provider with the UI registry
func RegisterUITempl(uiRegistry *types.UITemplRegistry, detector *BalanceDetector, scheduler *BalanceScheduler) {
uiProvider := NewUITemplProvider(detector, scheduler)
uiRegistry.RegisterUI(uiProvider)
glog.V(1).Infof("✅ Registered balance task templ UI provider")
}

View File

@@ -0,0 +1,79 @@
package erasure_coding
import (
"fmt"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// Task implements erasure coding operation to convert volumes to EC format
type Task struct {
*tasks.BaseTask
server string
volumeID uint32
}
// NewTask creates a new erasure coding task instance
func NewTask(server string, volumeID uint32) *Task {
task := &Task{
BaseTask: tasks.NewBaseTask(types.TaskTypeErasureCoding),
server: server,
volumeID: volumeID,
}
return task
}
// Execute executes the erasure coding task
func (t *Task) Execute(params types.TaskParams) error {
glog.Infof("Starting erasure coding task for volume %d on server %s", t.volumeID, t.server)
// Simulate erasure coding operation with progress updates
steps := []struct {
name string
duration time.Duration
progress float64
}{
{"Analyzing volume", 2 * time.Second, 15},
{"Creating EC shards", 5 * time.Second, 50},
{"Verifying shards", 2 * time.Second, 75},
{"Finalizing EC volume", 1 * time.Second, 100},
}
for _, step := range steps {
if t.IsCancelled() {
return fmt.Errorf("erasure coding task cancelled")
}
glog.V(1).Infof("Erasure coding task step: %s", step.name)
t.SetProgress(step.progress)
// Simulate work
time.Sleep(step.duration)
}
glog.Infof("Erasure coding task completed for volume %d on server %s", t.volumeID, t.server)
return nil
}
// Validate validates the task parameters
func (t *Task) Validate(params types.TaskParams) error {
if params.VolumeID == 0 {
return fmt.Errorf("volume_id is required")
}
if params.Server == "" {
return fmt.Errorf("server is required")
}
return nil
}
// EstimateTime estimates the time needed for the task
func (t *Task) EstimateTime(params types.TaskParams) time.Duration {
// Base time for erasure coding operation
baseTime := 30 * time.Second
// Could adjust based on volume size or other factors
return baseTime
}

View File

@@ -0,0 +1,139 @@
package erasure_coding
import (
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// EcDetector implements erasure coding task detection
type EcDetector struct {
enabled bool
volumeAgeHours int
fullnessRatio float64
scanInterval time.Duration
}
// Compile-time interface assertions
var (
_ types.TaskDetector = (*EcDetector)(nil)
)
// NewEcDetector creates a new erasure coding detector
func NewEcDetector() *EcDetector {
return &EcDetector{
enabled: false, // Conservative default
volumeAgeHours: 24 * 7, // 1 week
fullnessRatio: 0.9, // 90% full
scanInterval: 2 * time.Hour,
}
}
// GetTaskType returns the task type
func (d *EcDetector) GetTaskType() types.TaskType {
return types.TaskTypeErasureCoding
}
// ScanForTasks scans for volumes that should be converted to erasure coding
func (d *EcDetector) ScanForTasks(volumeMetrics []*types.VolumeHealthMetrics, clusterInfo *types.ClusterInfo) ([]*types.TaskDetectionResult, error) {
if !d.enabled {
return nil, nil
}
var results []*types.TaskDetectionResult
now := time.Now()
ageThreshold := time.Duration(d.volumeAgeHours) * time.Hour
for _, metric := range volumeMetrics {
// Skip if already EC volume
if metric.IsECVolume {
continue
}
// Check age and fullness criteria
if metric.Age >= ageThreshold && metric.FullnessRatio >= d.fullnessRatio {
// Check if volume is read-only (safe for EC conversion)
if !metric.IsReadOnly {
continue
}
result := &types.TaskDetectionResult{
TaskType: types.TaskTypeErasureCoding,
VolumeID: metric.VolumeID,
Server: metric.Server,
Collection: metric.Collection,
Priority: types.TaskPriorityLow, // EC is not urgent
Reason: "Volume is old and full enough for EC conversion",
Parameters: map[string]interface{}{
"age_hours": int(metric.Age.Hours()),
"fullness_ratio": metric.FullnessRatio,
},
ScheduleAt: now,
}
results = append(results, result)
}
}
glog.V(2).Infof("EC detector found %d tasks to schedule", len(results))
return results, nil
}
// ScanInterval returns how often this task type should be scanned
func (d *EcDetector) ScanInterval() time.Duration {
return d.scanInterval
}
// IsEnabled returns whether this task type is enabled
func (d *EcDetector) IsEnabled() bool {
return d.enabled
}
// Configuration setters
func (d *EcDetector) SetEnabled(enabled bool) {
d.enabled = enabled
}
func (d *EcDetector) SetVolumeAgeHours(hours int) {
d.volumeAgeHours = hours
}
func (d *EcDetector) SetFullnessRatio(ratio float64) {
d.fullnessRatio = ratio
}
func (d *EcDetector) SetScanInterval(interval time.Duration) {
d.scanInterval = interval
}
// GetVolumeAgeHours returns the current volume age threshold in hours
func (d *EcDetector) GetVolumeAgeHours() int {
return d.volumeAgeHours
}
// GetFullnessRatio returns the current fullness ratio threshold
func (d *EcDetector) GetFullnessRatio() float64 {
return d.fullnessRatio
}
// GetScanInterval returns the scan interval
func (d *EcDetector) GetScanInterval() time.Duration {
return d.scanInterval
}
// ConfigureFromPolicy configures the detector based on the maintenance policy
func (d *EcDetector) ConfigureFromPolicy(policy interface{}) {
// Type assert to the maintenance policy type we expect
if maintenancePolicy, ok := policy.(interface {
GetECEnabled() bool
GetECVolumeAgeHours() int
GetECFullnessRatio() float64
}); ok {
d.SetEnabled(maintenancePolicy.GetECEnabled())
d.SetVolumeAgeHours(maintenancePolicy.GetECVolumeAgeHours())
d.SetFullnessRatio(maintenancePolicy.GetECFullnessRatio())
} else {
glog.V(1).Infof("Could not configure EC detector from policy: unsupported policy type")
}
}

View File

@@ -0,0 +1,81 @@
package erasure_coding
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// Factory creates erasure coding task instances
type Factory struct {
*tasks.BaseTaskFactory
}
// NewFactory creates a new erasure coding task factory
func NewFactory() *Factory {
return &Factory{
BaseTaskFactory: tasks.NewBaseTaskFactory(
types.TaskTypeErasureCoding,
[]string{"erasure_coding", "storage", "durability"},
"Convert volumes to erasure coded format for improved durability",
),
}
}
// Create creates a new erasure coding task instance
func (f *Factory) Create(params types.TaskParams) (types.TaskInterface, error) {
// Validate parameters
if params.VolumeID == 0 {
return nil, fmt.Errorf("volume_id is required")
}
if params.Server == "" {
return nil, fmt.Errorf("server is required")
}
task := NewTask(params.Server, params.VolumeID)
task.SetEstimatedDuration(task.EstimateTime(params))
return task, nil
}
// Shared detector and scheduler instances
var (
sharedDetector *EcDetector
sharedScheduler *Scheduler
)
// getSharedInstances returns the shared detector and scheduler instances
func getSharedInstances() (*EcDetector, *Scheduler) {
if sharedDetector == nil {
sharedDetector = NewEcDetector()
}
if sharedScheduler == nil {
sharedScheduler = NewScheduler()
}
return sharedDetector, sharedScheduler
}
// GetSharedInstances returns the shared detector and scheduler instances (public access)
func GetSharedInstances() (*EcDetector, *Scheduler) {
return getSharedInstances()
}
// Auto-register this task when the package is imported
func init() {
factory := NewFactory()
tasks.AutoRegister(types.TaskTypeErasureCoding, factory)
// Get shared instances for all registrations
detector, scheduler := getSharedInstances()
// Register with types registry
tasks.AutoRegisterTypes(func(registry *types.TaskRegistry) {
registry.RegisterTask(detector, scheduler)
})
// Register with UI registry using the same instances
tasks.AutoRegisterUI(func(uiRegistry *types.UIRegistry) {
RegisterUI(uiRegistry, detector, scheduler)
})
}

View File

@@ -0,0 +1,114 @@
package erasure_coding
import (
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// Scheduler implements erasure coding task scheduling
type Scheduler struct {
maxConcurrent int
enabled bool
}
// NewScheduler creates a new erasure coding scheduler
func NewScheduler() *Scheduler {
return &Scheduler{
maxConcurrent: 1, // Conservative default
enabled: false, // Conservative default
}
}
// GetTaskType returns the task type
func (s *Scheduler) GetTaskType() types.TaskType {
return types.TaskTypeErasureCoding
}
// CanScheduleNow determines if an erasure coding task can be scheduled now
func (s *Scheduler) CanScheduleNow(task *types.Task, runningTasks []*types.Task, availableWorkers []*types.Worker) bool {
if !s.enabled {
return false
}
// Check if we have available workers
if len(availableWorkers) == 0 {
return false
}
// Count running EC tasks
runningCount := 0
for _, runningTask := range runningTasks {
if runningTask.Type == types.TaskTypeErasureCoding {
runningCount++
}
}
// Check concurrency limit
if runningCount >= s.maxConcurrent {
glog.V(3).Infof("EC scheduler: at concurrency limit (%d/%d)", runningCount, s.maxConcurrent)
return false
}
// Check if any worker can handle EC tasks
for _, worker := range availableWorkers {
for _, capability := range worker.Capabilities {
if capability == types.TaskTypeErasureCoding {
glog.V(3).Infof("EC scheduler: can schedule task for volume %d", task.VolumeID)
return true
}
}
}
return false
}
// GetMaxConcurrent returns the maximum number of concurrent tasks
func (s *Scheduler) GetMaxConcurrent() int {
return s.maxConcurrent
}
// GetDefaultRepeatInterval returns the default interval to wait before repeating EC tasks
func (s *Scheduler) GetDefaultRepeatInterval() time.Duration {
return 24 * time.Hour // Don't repeat EC for 24 hours
}
// GetPriority returns the priority for this task
func (s *Scheduler) GetPriority(task *types.Task) types.TaskPriority {
return types.TaskPriorityLow // EC is not urgent
}
// WasTaskRecentlyCompleted checks if a similar task was recently completed
func (s *Scheduler) WasTaskRecentlyCompleted(task *types.Task, completedTasks []*types.Task, now time.Time) bool {
// Don't repeat EC for 24 hours
interval := 24 * time.Hour
cutoff := now.Add(-interval)
for _, completedTask := range completedTasks {
if completedTask.Type == types.TaskTypeErasureCoding &&
completedTask.VolumeID == task.VolumeID &&
completedTask.Server == task.Server &&
completedTask.Status == types.TaskStatusCompleted &&
completedTask.CompletedAt != nil &&
completedTask.CompletedAt.After(cutoff) {
return true
}
}
return false
}
// IsEnabled returns whether this task type is enabled
func (s *Scheduler) IsEnabled() bool {
return s.enabled
}
// Configuration setters
func (s *Scheduler) SetEnabled(enabled bool) {
s.enabled = enabled
}
func (s *Scheduler) SetMaxConcurrent(max int) {
s.maxConcurrent = max
}

View File

@@ -0,0 +1,309 @@
package erasure_coding
import (
"fmt"
"html/template"
"strconv"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// UIProvider provides the UI for erasure coding task configuration
type UIProvider struct {
detector *EcDetector
scheduler *Scheduler
}
// NewUIProvider creates a new erasure coding UI provider
func NewUIProvider(detector *EcDetector, scheduler *Scheduler) *UIProvider {
return &UIProvider{
detector: detector,
scheduler: scheduler,
}
}
// GetTaskType returns the task type
func (ui *UIProvider) GetTaskType() types.TaskType {
return types.TaskTypeErasureCoding
}
// GetDisplayName returns the human-readable name
func (ui *UIProvider) GetDisplayName() string {
return "Erasure Coding"
}
// GetDescription returns a description of what this task does
func (ui *UIProvider) GetDescription() string {
return "Converts volumes to erasure coded format for improved data durability and fault tolerance"
}
// GetIcon returns the icon CSS class for this task type
func (ui *UIProvider) GetIcon() string {
return "fas fa-shield-alt text-info"
}
// ErasureCodingConfig represents the erasure coding configuration
type ErasureCodingConfig struct {
Enabled bool `json:"enabled"`
VolumeAgeHoursSeconds int `json:"volume_age_hours_seconds"`
FullnessRatio float64 `json:"fullness_ratio"`
ScanIntervalSeconds int `json:"scan_interval_seconds"`
MaxConcurrent int `json:"max_concurrent"`
ShardCount int `json:"shard_count"`
ParityCount int `json:"parity_count"`
CollectionFilter string `json:"collection_filter"`
}
// Helper functions for duration conversion
func secondsToDuration(seconds int) time.Duration {
return time.Duration(seconds) * time.Second
}
func durationToSeconds(d time.Duration) int {
return int(d.Seconds())
}
// formatDurationForUser formats seconds as a user-friendly duration string
func formatDurationForUser(seconds int) string {
d := secondsToDuration(seconds)
if d < time.Minute {
return fmt.Sprintf("%ds", seconds)
}
if d < time.Hour {
return fmt.Sprintf("%.0fm", d.Minutes())
}
if d < 24*time.Hour {
return fmt.Sprintf("%.1fh", d.Hours())
}
return fmt.Sprintf("%.1fd", d.Hours()/24)
}
// RenderConfigForm renders the configuration form HTML
func (ui *UIProvider) RenderConfigForm(currentConfig interface{}) (template.HTML, error) {
config := ui.getCurrentECConfig()
// Build form using the FormBuilder helper
form := types.NewFormBuilder()
// Detection Settings
form.AddCheckboxField(
"enabled",
"Enable Erasure Coding Tasks",
"Whether erasure coding tasks should be automatically created",
config.Enabled,
)
form.AddNumberField(
"volume_age_hours_seconds",
"Volume Age Threshold",
"Only apply erasure coding to volumes older than this duration",
float64(config.VolumeAgeHoursSeconds),
true,
)
form.AddNumberField(
"scan_interval_seconds",
"Scan Interval",
"How often to scan for volumes needing erasure coding",
float64(config.ScanIntervalSeconds),
true,
)
// Scheduling Settings
form.AddNumberField(
"max_concurrent",
"Max Concurrent Tasks",
"Maximum number of erasure coding tasks that can run simultaneously",
float64(config.MaxConcurrent),
true,
)
// Erasure Coding Parameters
form.AddNumberField(
"shard_count",
"Data Shards",
"Number of data shards for erasure coding (recommended: 10)",
float64(config.ShardCount),
true,
)
form.AddNumberField(
"parity_count",
"Parity Shards",
"Number of parity shards for erasure coding (recommended: 4)",
float64(config.ParityCount),
true,
)
// Generate organized form sections using Bootstrap components
html := `
<div class="row">
<div class="col-12">
<div class="card mb-4">
<div class="card-header">
<h5 class="mb-0">
<i class="fas fa-shield-alt me-2"></i>
Erasure Coding Configuration
</h5>
</div>
<div class="card-body">
` + string(form.Build()) + `
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-12">
<div class="card mb-3">
<div class="card-header">
<h5 class="mb-0">
<i class="fas fa-info-circle me-2"></i>
Performance Impact
</h5>
</div>
<div class="card-body">
<div class="alert alert-info" role="alert">
<h6 class="alert-heading">Important Notes:</h6>
<p class="mb-2"><strong>Performance:</strong> Erasure coding is CPU and I/O intensive. Consider running during off-peak hours.</p>
<p class="mb-0"><strong>Durability:</strong> With ` + fmt.Sprintf("%d+%d", config.ShardCount, config.ParityCount) + ` configuration, can tolerate up to ` + fmt.Sprintf("%d", config.ParityCount) + ` shard failures.</p>
</div>
</div>
</div>
</div>
</div>`
return template.HTML(html), nil
}
// ParseConfigForm parses form data into configuration
func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}, error) {
config := &ErasureCodingConfig{}
// Parse enabled
config.Enabled = len(formData["enabled"]) > 0
// Parse volume age hours
if values, ok := formData["volume_age_hours_seconds"]; ok && len(values) > 0 {
hours, err := strconv.Atoi(values[0])
if err != nil {
return nil, fmt.Errorf("invalid volume age hours: %v", err)
}
config.VolumeAgeHoursSeconds = hours
}
// Parse scan interval
if values, ok := formData["scan_interval_seconds"]; ok && len(values) > 0 {
interval, err := strconv.Atoi(values[0])
if err != nil {
return nil, fmt.Errorf("invalid scan interval: %v", err)
}
config.ScanIntervalSeconds = interval
}
// Parse max concurrent
if values, ok := formData["max_concurrent"]; ok && len(values) > 0 {
maxConcurrent, err := strconv.Atoi(values[0])
if err != nil {
return nil, fmt.Errorf("invalid max concurrent: %v", err)
}
if maxConcurrent < 1 {
return nil, fmt.Errorf("max concurrent must be at least 1")
}
config.MaxConcurrent = maxConcurrent
}
// Parse shard count
if values, ok := formData["shard_count"]; ok && len(values) > 0 {
shardCount, err := strconv.Atoi(values[0])
if err != nil {
return nil, fmt.Errorf("invalid shard count: %v", err)
}
if shardCount < 1 {
return nil, fmt.Errorf("shard count must be at least 1")
}
config.ShardCount = shardCount
}
// Parse parity count
if values, ok := formData["parity_count"]; ok && len(values) > 0 {
parityCount, err := strconv.Atoi(values[0])
if err != nil {
return nil, fmt.Errorf("invalid parity count: %v", err)
}
if parityCount < 1 {
return nil, fmt.Errorf("parity count must be at least 1")
}
config.ParityCount = parityCount
}
return config, nil
}
// GetCurrentConfig returns the current configuration
func (ui *UIProvider) GetCurrentConfig() interface{} {
return ui.getCurrentECConfig()
}
// ApplyConfig applies the new configuration
func (ui *UIProvider) ApplyConfig(config interface{}) error {
ecConfig, ok := config.(ErasureCodingConfig)
if !ok {
return fmt.Errorf("invalid config type, expected ErasureCodingConfig")
}
// Apply to detector
if ui.detector != nil {
ui.detector.SetEnabled(ecConfig.Enabled)
ui.detector.SetVolumeAgeHours(ecConfig.VolumeAgeHoursSeconds)
ui.detector.SetScanInterval(secondsToDuration(ecConfig.ScanIntervalSeconds))
}
// Apply to scheduler
if ui.scheduler != nil {
ui.scheduler.SetEnabled(ecConfig.Enabled)
ui.scheduler.SetMaxConcurrent(ecConfig.MaxConcurrent)
}
glog.V(1).Infof("Applied erasure coding configuration: enabled=%v, age_threshold=%v, max_concurrent=%d, shards=%d+%d",
ecConfig.Enabled, ecConfig.VolumeAgeHoursSeconds, ecConfig.MaxConcurrent, ecConfig.ShardCount, ecConfig.ParityCount)
return nil
}
// getCurrentECConfig gets the current configuration from detector and scheduler
func (ui *UIProvider) getCurrentECConfig() ErasureCodingConfig {
config := ErasureCodingConfig{
// Default values (fallback if detectors/schedulers are nil)
Enabled: true,
VolumeAgeHoursSeconds: 24 * 3600, // 24 hours in seconds
ScanIntervalSeconds: 2 * 3600, // 2 hours in seconds
MaxConcurrent: 1,
ShardCount: 10,
ParityCount: 4,
}
// Get current values from detector
if ui.detector != nil {
config.Enabled = ui.detector.IsEnabled()
config.VolumeAgeHoursSeconds = ui.detector.GetVolumeAgeHours()
config.ScanIntervalSeconds = durationToSeconds(ui.detector.ScanInterval())
}
// Get current values from scheduler
if ui.scheduler != nil {
config.MaxConcurrent = ui.scheduler.GetMaxConcurrent()
}
return config
}
// RegisterUI registers the erasure coding UI provider with the UI registry
func RegisterUI(uiRegistry *types.UIRegistry, detector *EcDetector, scheduler *Scheduler) {
uiProvider := NewUIProvider(detector, scheduler)
uiRegistry.RegisterUI(uiProvider)
glog.V(1).Infof("✅ Registered erasure coding task UI provider")
}

View File

@@ -0,0 +1,319 @@
package erasure_coding
import (
"fmt"
"strconv"
"time"
"github.com/seaweedfs/seaweedfs/weed/admin/view/components"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// Helper function to format seconds as duration string
func formatDurationFromSeconds(seconds int) string {
d := time.Duration(seconds) * time.Second
return d.String()
}
// Helper function to convert value and unit to seconds
func valueAndUnitToSeconds(value float64, unit string) int {
switch unit {
case "days":
return int(value * 24 * 60 * 60)
case "hours":
return int(value * 60 * 60)
case "minutes":
return int(value * 60)
default:
return int(value * 60) // Default to minutes
}
}
// UITemplProvider provides the templ-based UI for erasure coding task configuration
type UITemplProvider struct {
detector *EcDetector
scheduler *Scheduler
}
// NewUITemplProvider creates a new erasure coding templ UI provider
func NewUITemplProvider(detector *EcDetector, scheduler *Scheduler) *UITemplProvider {
return &UITemplProvider{
detector: detector,
scheduler: scheduler,
}
}
// ErasureCodingConfig is defined in ui.go - we reuse it
// GetTaskType returns the task type
func (ui *UITemplProvider) GetTaskType() types.TaskType {
return types.TaskTypeErasureCoding
}
// GetDisplayName returns the human-readable name
func (ui *UITemplProvider) GetDisplayName() string {
return "Erasure Coding"
}
// GetDescription returns a description of what this task does
func (ui *UITemplProvider) GetDescription() string {
return "Converts replicated volumes to erasure-coded format for efficient storage"
}
// GetIcon returns the icon CSS class for this task type
func (ui *UITemplProvider) GetIcon() string {
return "fas fa-shield-alt text-info"
}
// RenderConfigSections renders the configuration as templ section data
func (ui *UITemplProvider) RenderConfigSections(currentConfig interface{}) ([]components.ConfigSectionData, error) {
config := ui.getCurrentECConfig()
// Detection settings section
detectionSection := components.ConfigSectionData{
Title: "Detection Settings",
Icon: "fas fa-search",
Description: "Configure when erasure coding tasks should be triggered",
Fields: []interface{}{
components.CheckboxFieldData{
FormFieldData: components.FormFieldData{
Name: "enabled",
Label: "Enable Erasure Coding Tasks",
Description: "Whether erasure coding tasks should be automatically created",
},
Checked: config.Enabled,
},
components.DurationInputFieldData{
FormFieldData: components.FormFieldData{
Name: "scan_interval",
Label: "Scan Interval",
Description: "How often to scan for volumes needing erasure coding",
Required: true,
},
Seconds: config.ScanIntervalSeconds,
},
components.DurationInputFieldData{
FormFieldData: components.FormFieldData{
Name: "volume_age_threshold",
Label: "Volume Age Threshold",
Description: "Only apply erasure coding to volumes older than this age",
Required: true,
},
Seconds: config.VolumeAgeHoursSeconds,
},
},
}
// Erasure coding parameters section
paramsSection := components.ConfigSectionData{
Title: "Erasure Coding Parameters",
Icon: "fas fa-cogs",
Description: "Configure erasure coding scheme and performance",
Fields: []interface{}{
components.NumberFieldData{
FormFieldData: components.FormFieldData{
Name: "data_shards",
Label: "Data Shards",
Description: "Number of data shards in the erasure coding scheme",
Required: true,
},
Value: float64(config.ShardCount),
Step: "1",
Min: floatPtr(1),
Max: floatPtr(16),
},
components.NumberFieldData{
FormFieldData: components.FormFieldData{
Name: "parity_shards",
Label: "Parity Shards",
Description: "Number of parity shards (determines fault tolerance)",
Required: true,
},
Value: float64(config.ParityCount),
Step: "1",
Min: floatPtr(1),
Max: floatPtr(16),
},
components.NumberFieldData{
FormFieldData: components.FormFieldData{
Name: "max_concurrent",
Label: "Max Concurrent Tasks",
Description: "Maximum number of erasure coding tasks that can run simultaneously",
Required: true,
},
Value: float64(config.MaxConcurrent),
Step: "1",
Min: floatPtr(1),
},
},
}
// Performance impact info section
infoSection := components.ConfigSectionData{
Title: "Performance Impact",
Icon: "fas fa-info-circle",
Description: "Important information about erasure coding operations",
Fields: []interface{}{
components.TextFieldData{
FormFieldData: components.FormFieldData{
Name: "durability_info",
Label: "Durability",
Description: fmt.Sprintf("With %d+%d configuration, can tolerate up to %d shard failures",
config.ShardCount, config.ParityCount, config.ParityCount),
},
Value: "High durability with space efficiency",
},
components.TextFieldData{
FormFieldData: components.FormFieldData{
Name: "performance_info",
Label: "Performance Note",
Description: "Erasure coding is CPU and I/O intensive. Consider running during off-peak hours",
},
Value: "Schedule during low-traffic periods",
},
},
}
return []components.ConfigSectionData{detectionSection, paramsSection, infoSection}, nil
}
// ParseConfigForm parses form data into configuration
func (ui *UITemplProvider) ParseConfigForm(formData map[string][]string) (interface{}, error) {
config := &ErasureCodingConfig{}
// Parse enabled checkbox
config.Enabled = len(formData["enabled"]) > 0 && formData["enabled"][0] == "on"
// Parse volume age threshold
if valueStr := formData["volume_age_threshold"]; len(valueStr) > 0 {
if value, err := strconv.ParseFloat(valueStr[0], 64); err != nil {
return nil, fmt.Errorf("invalid volume age threshold value: %v", err)
} else {
unit := "hours" // default
if unitStr := formData["volume_age_threshold_unit"]; len(unitStr) > 0 {
unit = unitStr[0]
}
config.VolumeAgeHoursSeconds = valueAndUnitToSeconds(value, unit)
}
}
// Parse scan interval
if valueStr := formData["scan_interval"]; len(valueStr) > 0 {
if value, err := strconv.ParseFloat(valueStr[0], 64); err != nil {
return nil, fmt.Errorf("invalid scan interval value: %v", err)
} else {
unit := "hours" // default
if unitStr := formData["scan_interval_unit"]; len(unitStr) > 0 {
unit = unitStr[0]
}
config.ScanIntervalSeconds = valueAndUnitToSeconds(value, unit)
}
}
// Parse data shards
if shardsStr := formData["data_shards"]; len(shardsStr) > 0 {
if shards, err := strconv.Atoi(shardsStr[0]); err != nil {
return nil, fmt.Errorf("invalid data shards: %v", err)
} else if shards < 1 || shards > 16 {
return nil, fmt.Errorf("data shards must be between 1 and 16")
} else {
config.ShardCount = shards
}
}
// Parse parity shards
if shardsStr := formData["parity_shards"]; len(shardsStr) > 0 {
if shards, err := strconv.Atoi(shardsStr[0]); err != nil {
return nil, fmt.Errorf("invalid parity shards: %v", err)
} else if shards < 1 || shards > 16 {
return nil, fmt.Errorf("parity shards must be between 1 and 16")
} else {
config.ParityCount = shards
}
}
// Parse max concurrent
if concurrentStr := formData["max_concurrent"]; len(concurrentStr) > 0 {
if concurrent, err := strconv.Atoi(concurrentStr[0]); err != nil {
return nil, fmt.Errorf("invalid max concurrent: %v", err)
} else if concurrent < 1 {
return nil, fmt.Errorf("max concurrent must be at least 1")
} else {
config.MaxConcurrent = concurrent
}
}
return config, nil
}
// GetCurrentConfig returns the current configuration
func (ui *UITemplProvider) GetCurrentConfig() interface{} {
return ui.getCurrentECConfig()
}
// ApplyConfig applies the new configuration
func (ui *UITemplProvider) ApplyConfig(config interface{}) error {
ecConfig, ok := config.(*ErasureCodingConfig)
if !ok {
return fmt.Errorf("invalid config type, expected *ErasureCodingConfig")
}
// Apply to detector
if ui.detector != nil {
ui.detector.SetEnabled(ecConfig.Enabled)
ui.detector.SetVolumeAgeHours(ecConfig.VolumeAgeHoursSeconds)
ui.detector.SetScanInterval(time.Duration(ecConfig.ScanIntervalSeconds) * time.Second)
}
// Apply to scheduler
if ui.scheduler != nil {
ui.scheduler.SetMaxConcurrent(ecConfig.MaxConcurrent)
ui.scheduler.SetEnabled(ecConfig.Enabled)
}
glog.V(1).Infof("Applied erasure coding configuration: enabled=%v, age_threshold=%ds, max_concurrent=%d",
ecConfig.Enabled, ecConfig.VolumeAgeHoursSeconds, ecConfig.MaxConcurrent)
return nil
}
// getCurrentECConfig gets the current configuration from detector and scheduler
func (ui *UITemplProvider) getCurrentECConfig() *ErasureCodingConfig {
config := &ErasureCodingConfig{
// Default values (fallback if detectors/schedulers are nil)
Enabled: true,
VolumeAgeHoursSeconds: int((24 * time.Hour).Seconds()),
ScanIntervalSeconds: int((2 * time.Hour).Seconds()),
MaxConcurrent: 1,
ShardCount: 10,
ParityCount: 4,
}
// Get current values from detector
if ui.detector != nil {
config.Enabled = ui.detector.IsEnabled()
config.VolumeAgeHoursSeconds = ui.detector.GetVolumeAgeHours()
config.ScanIntervalSeconds = int(ui.detector.ScanInterval().Seconds())
}
// Get current values from scheduler
if ui.scheduler != nil {
config.MaxConcurrent = ui.scheduler.GetMaxConcurrent()
}
return config
}
// floatPtr is a helper function to create float64 pointers
func floatPtr(f float64) *float64 {
return &f
}
// RegisterUITempl registers the erasure coding templ UI provider with the UI registry
func RegisterUITempl(uiRegistry *types.UITemplRegistry, detector *EcDetector, scheduler *Scheduler) {
uiProvider := NewUITemplProvider(detector, scheduler)
uiRegistry.RegisterUI(uiProvider)
glog.V(1).Infof("✅ Registered erasure coding task templ UI provider")
}

View File

@@ -0,0 +1,110 @@
package tasks
import (
"sync"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
var (
globalRegistry *TaskRegistry
globalTypesRegistry *types.TaskRegistry
globalUIRegistry *types.UIRegistry
registryOnce sync.Once
typesRegistryOnce sync.Once
uiRegistryOnce sync.Once
)
// GetGlobalRegistry returns the global task registry (singleton)
func GetGlobalRegistry() *TaskRegistry {
registryOnce.Do(func() {
globalRegistry = NewTaskRegistry()
glog.V(1).Infof("Created global task registry")
})
return globalRegistry
}
// GetGlobalTypesRegistry returns the global types registry (singleton)
func GetGlobalTypesRegistry() *types.TaskRegistry {
typesRegistryOnce.Do(func() {
globalTypesRegistry = types.NewTaskRegistry()
glog.V(1).Infof("Created global types registry")
})
return globalTypesRegistry
}
// GetGlobalUIRegistry returns the global UI registry (singleton)
func GetGlobalUIRegistry() *types.UIRegistry {
uiRegistryOnce.Do(func() {
globalUIRegistry = types.NewUIRegistry()
glog.V(1).Infof("Created global UI registry")
})
return globalUIRegistry
}
// AutoRegister registers a task directly with the global registry
func AutoRegister(taskType types.TaskType, factory types.TaskFactory) {
registry := GetGlobalRegistry()
registry.Register(taskType, factory)
glog.V(1).Infof("Auto-registered task type: %s", taskType)
}
// AutoRegisterTypes registers a task with the global types registry
func AutoRegisterTypes(registerFunc func(*types.TaskRegistry)) {
registry := GetGlobalTypesRegistry()
registerFunc(registry)
glog.V(1).Infof("Auto-registered task with types registry")
}
// AutoRegisterUI registers a UI provider with the global UI registry
func AutoRegisterUI(registerFunc func(*types.UIRegistry)) {
registry := GetGlobalUIRegistry()
registerFunc(registry)
glog.V(1).Infof("Auto-registered task UI provider")
}
// SetDefaultCapabilitiesFromRegistry sets the default worker capabilities
// based on all registered task types
func SetDefaultCapabilitiesFromRegistry() {
typesRegistry := GetGlobalTypesRegistry()
var capabilities []types.TaskType
for taskType := range typesRegistry.GetAllDetectors() {
capabilities = append(capabilities, taskType)
}
// Set the default capabilities in the types package
types.SetDefaultCapabilities(capabilities)
glog.V(1).Infof("Set default worker capabilities from registry: %v", capabilities)
}
// BuildMaintenancePolicyFromTasks creates a maintenance policy with default configurations
// from all registered tasks using their UI providers
func BuildMaintenancePolicyFromTasks() *types.MaintenancePolicy {
policy := types.NewMaintenancePolicy()
// Get all registered task types from the UI registry
uiRegistry := GetGlobalUIRegistry()
for taskType, provider := range uiRegistry.GetAllProviders() {
// Get the default configuration from the UI provider
defaultConfig := provider.GetCurrentConfig()
// Set the configuration in the policy
policy.SetTaskConfig(taskType, defaultConfig)
glog.V(3).Infof("Added default config for task type %s to policy", taskType)
}
glog.V(2).Infof("Built maintenance policy with %d task configurations", len(policy.TaskConfigs))
return policy
}
// SetMaintenancePolicyFromTasks sets the default maintenance policy from registered tasks
func SetMaintenancePolicyFromTasks() {
// This function can be called to initialize the policy from registered tasks
// For now, we'll just log that this should be called by the integration layer
glog.V(1).Infof("SetMaintenancePolicyFromTasks called - policy should be built by the integration layer")
}

252
weed/worker/tasks/task.go Normal file
View File

@@ -0,0 +1,252 @@
package tasks
import (
"context"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// BaseTask provides common functionality for all tasks
type BaseTask struct {
taskType types.TaskType
progress float64
cancelled bool
mutex sync.RWMutex
startTime time.Time
estimatedDuration time.Duration
}
// NewBaseTask creates a new base task
func NewBaseTask(taskType types.TaskType) *BaseTask {
return &BaseTask{
taskType: taskType,
progress: 0.0,
cancelled: false,
}
}
// Type returns the task type
func (t *BaseTask) Type() types.TaskType {
return t.taskType
}
// GetProgress returns the current progress (0.0 to 100.0)
func (t *BaseTask) GetProgress() float64 {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.progress
}
// SetProgress sets the current progress
func (t *BaseTask) SetProgress(progress float64) {
t.mutex.Lock()
defer t.mutex.Unlock()
if progress < 0 {
progress = 0
}
if progress > 100 {
progress = 100
}
t.progress = progress
}
// Cancel cancels the task
func (t *BaseTask) Cancel() error {
t.mutex.Lock()
defer t.mutex.Unlock()
t.cancelled = true
return nil
}
// IsCancelled returns whether the task is cancelled
func (t *BaseTask) IsCancelled() bool {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.cancelled
}
// SetStartTime sets the task start time
func (t *BaseTask) SetStartTime(startTime time.Time) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.startTime = startTime
}
// GetStartTime returns the task start time
func (t *BaseTask) GetStartTime() time.Time {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.startTime
}
// SetEstimatedDuration sets the estimated duration
func (t *BaseTask) SetEstimatedDuration(duration time.Duration) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.estimatedDuration = duration
}
// GetEstimatedDuration returns the estimated duration
func (t *BaseTask) GetEstimatedDuration() time.Duration {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.estimatedDuration
}
// ExecuteTask is a wrapper that handles common task execution logic
func (t *BaseTask) ExecuteTask(ctx context.Context, params types.TaskParams, executor func(context.Context, types.TaskParams) error) error {
t.SetStartTime(time.Now())
t.SetProgress(0)
// Create a context that can be cancelled
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Monitor for cancellation
go func() {
for !t.IsCancelled() {
select {
case <-ctx.Done():
return
case <-time.After(time.Second):
// Check cancellation every second
}
}
cancel()
}()
// Execute the actual task
err := executor(ctx, params)
if err != nil {
return err
}
if t.IsCancelled() {
return context.Canceled
}
t.SetProgress(100)
return nil
}
// TaskRegistry manages task factories
type TaskRegistry struct {
factories map[types.TaskType]types.TaskFactory
mutex sync.RWMutex
}
// NewTaskRegistry creates a new task registry
func NewTaskRegistry() *TaskRegistry {
return &TaskRegistry{
factories: make(map[types.TaskType]types.TaskFactory),
}
}
// Register registers a task factory
func (r *TaskRegistry) Register(taskType types.TaskType, factory types.TaskFactory) {
r.mutex.Lock()
defer r.mutex.Unlock()
r.factories[taskType] = factory
}
// CreateTask creates a task instance
func (r *TaskRegistry) CreateTask(taskType types.TaskType, params types.TaskParams) (types.TaskInterface, error) {
r.mutex.RLock()
factory, exists := r.factories[taskType]
r.mutex.RUnlock()
if !exists {
return nil, &UnsupportedTaskTypeError{TaskType: taskType}
}
return factory.Create(params)
}
// GetSupportedTypes returns all supported task types
func (r *TaskRegistry) GetSupportedTypes() []types.TaskType {
r.mutex.RLock()
defer r.mutex.RUnlock()
types := make([]types.TaskType, 0, len(r.factories))
for taskType := range r.factories {
types = append(types, taskType)
}
return types
}
// GetFactory returns the factory for a task type
func (r *TaskRegistry) GetFactory(taskType types.TaskType) (types.TaskFactory, bool) {
r.mutex.RLock()
defer r.mutex.RUnlock()
factory, exists := r.factories[taskType]
return factory, exists
}
// UnsupportedTaskTypeError represents an error for unsupported task types
type UnsupportedTaskTypeError struct {
TaskType types.TaskType
}
func (e *UnsupportedTaskTypeError) Error() string {
return "unsupported task type: " + string(e.TaskType)
}
// BaseTaskFactory provides common functionality for task factories
type BaseTaskFactory struct {
taskType types.TaskType
capabilities []string
description string
}
// NewBaseTaskFactory creates a new base task factory
func NewBaseTaskFactory(taskType types.TaskType, capabilities []string, description string) *BaseTaskFactory {
return &BaseTaskFactory{
taskType: taskType,
capabilities: capabilities,
description: description,
}
}
// Capabilities returns the capabilities required for this task type
func (f *BaseTaskFactory) Capabilities() []string {
return f.capabilities
}
// Description returns the description of this task type
func (f *BaseTaskFactory) Description() string {
return f.description
}
// ValidateParams validates task parameters
func ValidateParams(params types.TaskParams, requiredFields ...string) error {
for _, field := range requiredFields {
switch field {
case "volume_id":
if params.VolumeID == 0 {
return &ValidationError{Field: field, Message: "volume_id is required"}
}
case "server":
if params.Server == "" {
return &ValidationError{Field: field, Message: "server is required"}
}
case "collection":
if params.Collection == "" {
return &ValidationError{Field: field, Message: "collection is required"}
}
}
}
return nil
}
// ValidationError represents a parameter validation error
type ValidationError struct {
Field string
Message string
}
func (e *ValidationError) Error() string {
return e.Field + ": " + e.Message
}

View File

@@ -0,0 +1,314 @@
package vacuum
import (
"fmt"
"html/template"
"strconv"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// UIProvider provides the UI for vacuum task configuration
type UIProvider struct {
detector *VacuumDetector
scheduler *VacuumScheduler
}
// NewUIProvider creates a new vacuum UI provider
func NewUIProvider(detector *VacuumDetector, scheduler *VacuumScheduler) *UIProvider {
return &UIProvider{
detector: detector,
scheduler: scheduler,
}
}
// GetTaskType returns the task type
func (ui *UIProvider) GetTaskType() types.TaskType {
return types.TaskTypeVacuum
}
// GetDisplayName returns the human-readable name
func (ui *UIProvider) GetDisplayName() string {
return "Volume Vacuum"
}
// GetDescription returns a description of what this task does
func (ui *UIProvider) GetDescription() string {
return "Reclaims disk space by removing deleted files from volumes"
}
// GetIcon returns the icon CSS class for this task type
func (ui *UIProvider) GetIcon() string {
return "fas fa-broom text-primary"
}
// VacuumConfig represents the vacuum configuration
type VacuumConfig struct {
Enabled bool `json:"enabled"`
GarbageThreshold float64 `json:"garbage_threshold"`
ScanIntervalSeconds int `json:"scan_interval_seconds"`
MaxConcurrent int `json:"max_concurrent"`
MinVolumeAgeSeconds int `json:"min_volume_age_seconds"`
MinIntervalSeconds int `json:"min_interval_seconds"`
}
// Helper functions for duration conversion
func secondsToDuration(seconds int) time.Duration {
return time.Duration(seconds) * time.Second
}
func durationToSeconds(d time.Duration) int {
return int(d.Seconds())
}
// formatDurationForUser formats seconds as a user-friendly duration string
func formatDurationForUser(seconds int) string {
d := secondsToDuration(seconds)
if d < time.Minute {
return fmt.Sprintf("%ds", seconds)
}
if d < time.Hour {
return fmt.Sprintf("%.0fm", d.Minutes())
}
if d < 24*time.Hour {
return fmt.Sprintf("%.1fh", d.Hours())
}
return fmt.Sprintf("%.1fd", d.Hours()/24)
}
// RenderConfigForm renders the configuration form HTML
func (ui *UIProvider) RenderConfigForm(currentConfig interface{}) (template.HTML, error) {
config := ui.getCurrentVacuumConfig()
// Build form using the FormBuilder helper
form := types.NewFormBuilder()
// Detection Settings
form.AddCheckboxField(
"enabled",
"Enable Vacuum Tasks",
"Whether vacuum tasks should be automatically created",
config.Enabled,
)
form.AddNumberField(
"garbage_threshold",
"Garbage Threshold (%)",
"Trigger vacuum when garbage ratio exceeds this percentage (0.0-1.0)",
config.GarbageThreshold,
true,
)
form.AddDurationField(
"scan_interval",
"Scan Interval",
"How often to scan for volumes needing vacuum",
secondsToDuration(config.ScanIntervalSeconds),
true,
)
form.AddDurationField(
"min_volume_age",
"Minimum Volume Age",
"Only vacuum volumes older than this duration",
secondsToDuration(config.MinVolumeAgeSeconds),
true,
)
// Scheduling Settings
form.AddNumberField(
"max_concurrent",
"Max Concurrent Tasks",
"Maximum number of vacuum tasks that can run simultaneously",
float64(config.MaxConcurrent),
true,
)
form.AddDurationField(
"min_interval",
"Minimum Interval",
"Minimum time between vacuum operations on the same volume",
secondsToDuration(config.MinIntervalSeconds),
true,
)
// Generate organized form sections using Bootstrap components
html := `
<div class="row">
<div class="col-12">
<div class="card mb-4">
<div class="card-header">
<h5 class="mb-0">
<i class="fas fa-search me-2"></i>
Detection Settings
</h5>
</div>
<div class="card-body">
` + string(form.Build()) + `
</div>
</div>
</div>
</div>
<script>
function resetForm() {
if (confirm('Reset all vacuum settings to defaults?')) {
// Reset to default values
document.querySelector('input[name="enabled"]').checked = true;
document.querySelector('input[name="garbage_threshold"]').value = '0.3';
document.querySelector('input[name="scan_interval"]').value = '30m';
document.querySelector('input[name="min_volume_age"]').value = '1h';
document.querySelector('input[name="max_concurrent"]').value = '2';
document.querySelector('input[name="min_interval"]').value = '6h';
}
}
</script>
`
return template.HTML(html), nil
}
// ParseConfigForm parses form data into configuration
func (ui *UIProvider) ParseConfigForm(formData map[string][]string) (interface{}, error) {
config := &VacuumConfig{}
// Parse enabled checkbox
config.Enabled = len(formData["enabled"]) > 0 && formData["enabled"][0] == "on"
// Parse garbage threshold
if thresholdStr := formData["garbage_threshold"]; len(thresholdStr) > 0 {
if threshold, err := strconv.ParseFloat(thresholdStr[0], 64); err != nil {
return nil, fmt.Errorf("invalid garbage threshold: %v", err)
} else if threshold < 0 || threshold > 1 {
return nil, fmt.Errorf("garbage threshold must be between 0.0 and 1.0")
} else {
config.GarbageThreshold = threshold
}
}
// Parse scan interval
if intervalStr := formData["scan_interval"]; len(intervalStr) > 0 {
if interval, err := time.ParseDuration(intervalStr[0]); err != nil {
return nil, fmt.Errorf("invalid scan interval: %v", err)
} else {
config.ScanIntervalSeconds = durationToSeconds(interval)
}
}
// Parse min volume age
if ageStr := formData["min_volume_age"]; len(ageStr) > 0 {
if age, err := time.ParseDuration(ageStr[0]); err != nil {
return nil, fmt.Errorf("invalid min volume age: %v", err)
} else {
config.MinVolumeAgeSeconds = durationToSeconds(age)
}
}
// Parse max concurrent
if concurrentStr := formData["max_concurrent"]; len(concurrentStr) > 0 {
if concurrent, err := strconv.Atoi(concurrentStr[0]); err != nil {
return nil, fmt.Errorf("invalid max concurrent: %v", err)
} else if concurrent < 1 {
return nil, fmt.Errorf("max concurrent must be at least 1")
} else {
config.MaxConcurrent = concurrent
}
}
// Parse min interval
if intervalStr := formData["min_interval"]; len(intervalStr) > 0 {
if interval, err := time.ParseDuration(intervalStr[0]); err != nil {
return nil, fmt.Errorf("invalid min interval: %v", err)
} else {
config.MinIntervalSeconds = durationToSeconds(interval)
}
}
return config, nil
}
// GetCurrentConfig returns the current configuration
func (ui *UIProvider) GetCurrentConfig() interface{} {
return ui.getCurrentVacuumConfig()
}
// ApplyConfig applies the new configuration
func (ui *UIProvider) ApplyConfig(config interface{}) error {
vacuumConfig, ok := config.(*VacuumConfig)
if !ok {
return fmt.Errorf("invalid config type, expected *VacuumConfig")
}
// Apply to detector
if ui.detector != nil {
ui.detector.SetEnabled(vacuumConfig.Enabled)
ui.detector.SetGarbageThreshold(vacuumConfig.GarbageThreshold)
ui.detector.SetScanInterval(secondsToDuration(vacuumConfig.ScanIntervalSeconds))
ui.detector.SetMinVolumeAge(secondsToDuration(vacuumConfig.MinVolumeAgeSeconds))
}
// Apply to scheduler
if ui.scheduler != nil {
ui.scheduler.SetEnabled(vacuumConfig.Enabled)
ui.scheduler.SetMaxConcurrent(vacuumConfig.MaxConcurrent)
ui.scheduler.SetMinInterval(secondsToDuration(vacuumConfig.MinIntervalSeconds))
}
glog.V(1).Infof("Applied vacuum configuration: enabled=%v, threshold=%.1f%%, scan_interval=%s, max_concurrent=%d",
vacuumConfig.Enabled, vacuumConfig.GarbageThreshold*100, formatDurationForUser(vacuumConfig.ScanIntervalSeconds), vacuumConfig.MaxConcurrent)
return nil
}
// getCurrentVacuumConfig gets the current configuration from detector and scheduler
func (ui *UIProvider) getCurrentVacuumConfig() *VacuumConfig {
config := &VacuumConfig{
// Default values (fallback if detectors/schedulers are nil)
Enabled: true,
GarbageThreshold: 0.3,
ScanIntervalSeconds: 30 * 60,
MinVolumeAgeSeconds: 1 * 60 * 60,
MaxConcurrent: 2,
MinIntervalSeconds: 6 * 60 * 60,
}
// Get current values from detector
if ui.detector != nil {
config.Enabled = ui.detector.IsEnabled()
config.GarbageThreshold = ui.detector.GetGarbageThreshold()
config.ScanIntervalSeconds = durationToSeconds(ui.detector.ScanInterval())
config.MinVolumeAgeSeconds = durationToSeconds(ui.detector.GetMinVolumeAge())
}
// Get current values from scheduler
if ui.scheduler != nil {
config.MaxConcurrent = ui.scheduler.GetMaxConcurrent()
config.MinIntervalSeconds = durationToSeconds(ui.scheduler.GetMinInterval())
}
return config
}
// RegisterUI registers the vacuum UI provider with the UI registry
func RegisterUI(uiRegistry *types.UIRegistry, detector *VacuumDetector, scheduler *VacuumScheduler) {
uiProvider := NewUIProvider(detector, scheduler)
uiRegistry.RegisterUI(uiProvider)
glog.V(1).Infof("✅ Registered vacuum task UI provider")
}
// Example: How to get the UI provider for external use
func GetUIProvider(uiRegistry *types.UIRegistry) *UIProvider {
provider := uiRegistry.GetProvider(types.TaskTypeVacuum)
if provider == nil {
return nil
}
if vacuumProvider, ok := provider.(*UIProvider); ok {
return vacuumProvider
}
return nil
}

View File

@@ -0,0 +1,330 @@
package vacuum
import (
"fmt"
"strconv"
"time"
"github.com/seaweedfs/seaweedfs/weed/admin/view/components"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// Helper function to format seconds as duration string
func formatDurationFromSeconds(seconds int) string {
d := time.Duration(seconds) * time.Second
return d.String()
}
// Helper functions to convert between seconds and value+unit format
func secondsToValueAndUnit(seconds int) (float64, string) {
if seconds == 0 {
return 0, "minutes"
}
// Try days first
if seconds%(24*3600) == 0 && seconds >= 24*3600 {
return float64(seconds / (24 * 3600)), "days"
}
// Try hours
if seconds%3600 == 0 && seconds >= 3600 {
return float64(seconds / 3600), "hours"
}
// Default to minutes
return float64(seconds / 60), "minutes"
}
func valueAndUnitToSeconds(value float64, unit string) int {
switch unit {
case "days":
return int(value * 24 * 3600)
case "hours":
return int(value * 3600)
case "minutes":
return int(value * 60)
default:
return int(value * 60) // Default to minutes
}
}
// UITemplProvider provides the templ-based UI for vacuum task configuration
type UITemplProvider struct {
detector *VacuumDetector
scheduler *VacuumScheduler
}
// NewUITemplProvider creates a new vacuum templ UI provider
func NewUITemplProvider(detector *VacuumDetector, scheduler *VacuumScheduler) *UITemplProvider {
return &UITemplProvider{
detector: detector,
scheduler: scheduler,
}
}
// GetTaskType returns the task type
func (ui *UITemplProvider) GetTaskType() types.TaskType {
return types.TaskTypeVacuum
}
// GetDisplayName returns the human-readable name
func (ui *UITemplProvider) GetDisplayName() string {
return "Volume Vacuum"
}
// GetDescription returns a description of what this task does
func (ui *UITemplProvider) GetDescription() string {
return "Reclaims disk space by removing deleted files from volumes"
}
// GetIcon returns the icon CSS class for this task type
func (ui *UITemplProvider) GetIcon() string {
return "fas fa-broom text-primary"
}
// RenderConfigSections renders the configuration as templ section data
func (ui *UITemplProvider) RenderConfigSections(currentConfig interface{}) ([]components.ConfigSectionData, error) {
config := ui.getCurrentVacuumConfig()
// Detection settings section
detectionSection := components.ConfigSectionData{
Title: "Detection Settings",
Icon: "fas fa-search",
Description: "Configure when vacuum tasks should be triggered",
Fields: []interface{}{
components.CheckboxFieldData{
FormFieldData: components.FormFieldData{
Name: "enabled",
Label: "Enable Vacuum Tasks",
Description: "Whether vacuum tasks should be automatically created",
},
Checked: config.Enabled,
},
components.NumberFieldData{
FormFieldData: components.FormFieldData{
Name: "garbage_threshold",
Label: "Garbage Threshold",
Description: "Trigger vacuum when garbage ratio exceeds this percentage (0.0-1.0)",
Required: true,
},
Value: config.GarbageThreshold,
Step: "0.01",
Min: floatPtr(0.0),
Max: floatPtr(1.0),
},
components.DurationInputFieldData{
FormFieldData: components.FormFieldData{
Name: "scan_interval",
Label: "Scan Interval",
Description: "How often to scan for volumes needing vacuum",
Required: true,
},
Seconds: config.ScanIntervalSeconds,
},
components.DurationInputFieldData{
FormFieldData: components.FormFieldData{
Name: "min_volume_age",
Label: "Minimum Volume Age",
Description: "Only vacuum volumes older than this duration",
Required: true,
},
Seconds: config.MinVolumeAgeSeconds,
},
},
}
// Scheduling settings section
schedulingSection := components.ConfigSectionData{
Title: "Scheduling Settings",
Icon: "fas fa-clock",
Description: "Configure task scheduling and concurrency",
Fields: []interface{}{
components.NumberFieldData{
FormFieldData: components.FormFieldData{
Name: "max_concurrent",
Label: "Max Concurrent Tasks",
Description: "Maximum number of vacuum tasks that can run simultaneously",
Required: true,
},
Value: float64(config.MaxConcurrent),
Step: "1",
Min: floatPtr(1),
},
components.DurationInputFieldData{
FormFieldData: components.FormFieldData{
Name: "min_interval",
Label: "Minimum Interval",
Description: "Minimum time between vacuum operations on the same volume",
Required: true,
},
Seconds: config.MinIntervalSeconds,
},
},
}
// Performance impact info section
performanceSection := components.ConfigSectionData{
Title: "Performance Impact",
Icon: "fas fa-exclamation-triangle",
Description: "Important information about vacuum operations",
Fields: []interface{}{
components.TextFieldData{
FormFieldData: components.FormFieldData{
Name: "info_impact",
Label: "Impact",
Description: "Volume vacuum operations are I/O intensive and should be scheduled appropriately",
},
Value: "Configure thresholds and intervals based on your storage usage patterns",
},
},
}
return []components.ConfigSectionData{detectionSection, schedulingSection, performanceSection}, nil
}
// ParseConfigForm parses form data into configuration
func (ui *UITemplProvider) ParseConfigForm(formData map[string][]string) (interface{}, error) {
config := &VacuumConfig{}
// Parse enabled checkbox
config.Enabled = len(formData["enabled"]) > 0 && formData["enabled"][0] == "on"
// Parse garbage threshold
if thresholdStr := formData["garbage_threshold"]; len(thresholdStr) > 0 {
if threshold, err := strconv.ParseFloat(thresholdStr[0], 64); err != nil {
return nil, fmt.Errorf("invalid garbage threshold: %v", err)
} else if threshold < 0 || threshold > 1 {
return nil, fmt.Errorf("garbage threshold must be between 0.0 and 1.0")
} else {
config.GarbageThreshold = threshold
}
}
// Parse scan interval
if valueStr := formData["scan_interval"]; len(valueStr) > 0 {
if value, err := strconv.ParseFloat(valueStr[0], 64); err != nil {
return nil, fmt.Errorf("invalid scan interval value: %v", err)
} else {
unit := "minutes" // default
if unitStr := formData["scan_interval_unit"]; len(unitStr) > 0 {
unit = unitStr[0]
}
config.ScanIntervalSeconds = valueAndUnitToSeconds(value, unit)
}
}
// Parse min volume age
if valueStr := formData["min_volume_age"]; len(valueStr) > 0 {
if value, err := strconv.ParseFloat(valueStr[0], 64); err != nil {
return nil, fmt.Errorf("invalid min volume age value: %v", err)
} else {
unit := "minutes" // default
if unitStr := formData["min_volume_age_unit"]; len(unitStr) > 0 {
unit = unitStr[0]
}
config.MinVolumeAgeSeconds = valueAndUnitToSeconds(value, unit)
}
}
// Parse max concurrent
if concurrentStr := formData["max_concurrent"]; len(concurrentStr) > 0 {
if concurrent, err := strconv.Atoi(concurrentStr[0]); err != nil {
return nil, fmt.Errorf("invalid max concurrent: %v", err)
} else if concurrent < 1 {
return nil, fmt.Errorf("max concurrent must be at least 1")
} else {
config.MaxConcurrent = concurrent
}
}
// Parse min interval
if valueStr := formData["min_interval"]; len(valueStr) > 0 {
if value, err := strconv.ParseFloat(valueStr[0], 64); err != nil {
return nil, fmt.Errorf("invalid min interval value: %v", err)
} else {
unit := "minutes" // default
if unitStr := formData["min_interval_unit"]; len(unitStr) > 0 {
unit = unitStr[0]
}
config.MinIntervalSeconds = valueAndUnitToSeconds(value, unit)
}
}
return config, nil
}
// GetCurrentConfig returns the current configuration
func (ui *UITemplProvider) GetCurrentConfig() interface{} {
return ui.getCurrentVacuumConfig()
}
// ApplyConfig applies the new configuration
func (ui *UITemplProvider) ApplyConfig(config interface{}) error {
vacuumConfig, ok := config.(*VacuumConfig)
if !ok {
return fmt.Errorf("invalid config type, expected *VacuumConfig")
}
// Apply to detector
if ui.detector != nil {
ui.detector.SetEnabled(vacuumConfig.Enabled)
ui.detector.SetGarbageThreshold(vacuumConfig.GarbageThreshold)
ui.detector.SetScanInterval(time.Duration(vacuumConfig.ScanIntervalSeconds) * time.Second)
ui.detector.SetMinVolumeAge(time.Duration(vacuumConfig.MinVolumeAgeSeconds) * time.Second)
}
// Apply to scheduler
if ui.scheduler != nil {
ui.scheduler.SetEnabled(vacuumConfig.Enabled)
ui.scheduler.SetMaxConcurrent(vacuumConfig.MaxConcurrent)
ui.scheduler.SetMinInterval(time.Duration(vacuumConfig.MinIntervalSeconds) * time.Second)
}
glog.V(1).Infof("Applied vacuum configuration: enabled=%v, threshold=%.1f%%, scan_interval=%s, max_concurrent=%d",
vacuumConfig.Enabled, vacuumConfig.GarbageThreshold*100, formatDurationFromSeconds(vacuumConfig.ScanIntervalSeconds), vacuumConfig.MaxConcurrent)
return nil
}
// getCurrentVacuumConfig gets the current configuration from detector and scheduler
func (ui *UITemplProvider) getCurrentVacuumConfig() *VacuumConfig {
config := &VacuumConfig{
// Default values (fallback if detectors/schedulers are nil)
Enabled: true,
GarbageThreshold: 0.3,
ScanIntervalSeconds: int((30 * time.Minute).Seconds()),
MinVolumeAgeSeconds: int((1 * time.Hour).Seconds()),
MaxConcurrent: 2,
MinIntervalSeconds: int((6 * time.Hour).Seconds()),
}
// Get current values from detector
if ui.detector != nil {
config.Enabled = ui.detector.IsEnabled()
config.GarbageThreshold = ui.detector.GetGarbageThreshold()
config.ScanIntervalSeconds = int(ui.detector.ScanInterval().Seconds())
config.MinVolumeAgeSeconds = int(ui.detector.GetMinVolumeAge().Seconds())
}
// Get current values from scheduler
if ui.scheduler != nil {
config.MaxConcurrent = ui.scheduler.GetMaxConcurrent()
config.MinIntervalSeconds = int(ui.scheduler.GetMinInterval().Seconds())
}
return config
}
// floatPtr is a helper function to create float64 pointers
func floatPtr(f float64) *float64 {
return &f
}
// RegisterUITempl registers the vacuum templ UI provider with the UI registry
func RegisterUITempl(uiRegistry *types.UITemplRegistry, detector *VacuumDetector, scheduler *VacuumScheduler) {
uiProvider := NewUITemplProvider(detector, scheduler)
uiRegistry.RegisterUI(uiProvider)
glog.V(1).Infof("✅ Registered vacuum task templ UI provider")
}

View File

@@ -0,0 +1,79 @@
package vacuum
import (
"fmt"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// Task implements vacuum operation to reclaim disk space
type Task struct {
*tasks.BaseTask
server string
volumeID uint32
}
// NewTask creates a new vacuum task instance
func NewTask(server string, volumeID uint32) *Task {
task := &Task{
BaseTask: tasks.NewBaseTask(types.TaskTypeVacuum),
server: server,
volumeID: volumeID,
}
return task
}
// Execute executes the vacuum task
func (t *Task) Execute(params types.TaskParams) error {
glog.Infof("Starting vacuum task for volume %d on server %s", t.volumeID, t.server)
// Simulate vacuum operation with progress updates
steps := []struct {
name string
duration time.Duration
progress float64
}{
{"Scanning volume", 1 * time.Second, 20},
{"Identifying deleted files", 2 * time.Second, 50},
{"Compacting data", 3 * time.Second, 80},
{"Finalizing vacuum", 1 * time.Second, 100},
}
for _, step := range steps {
if t.IsCancelled() {
return fmt.Errorf("vacuum task cancelled")
}
glog.V(1).Infof("Vacuum task step: %s", step.name)
t.SetProgress(step.progress)
// Simulate work
time.Sleep(step.duration)
}
glog.Infof("Vacuum task completed for volume %d on server %s", t.volumeID, t.server)
return nil
}
// Validate validates the task parameters
func (t *Task) Validate(params types.TaskParams) error {
if params.VolumeID == 0 {
return fmt.Errorf("volume_id is required")
}
if params.Server == "" {
return fmt.Errorf("server is required")
}
return nil
}
// EstimateTime estimates the time needed for the task
func (t *Task) EstimateTime(params types.TaskParams) time.Duration {
// Base time for vacuum operation
baseTime := 25 * time.Second
// Could adjust based on volume size or usage patterns
return baseTime
}

View File

@@ -0,0 +1,132 @@
package vacuum
import (
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// VacuumDetector implements vacuum task detection using code instead of schemas
type VacuumDetector struct {
enabled bool
garbageThreshold float64
minVolumeAge time.Duration
scanInterval time.Duration
}
// Compile-time interface assertions
var (
_ types.TaskDetector = (*VacuumDetector)(nil)
_ types.PolicyConfigurableDetector = (*VacuumDetector)(nil)
)
// NewVacuumDetector creates a new simple vacuum detector
func NewVacuumDetector() *VacuumDetector {
return &VacuumDetector{
enabled: true,
garbageThreshold: 0.3,
minVolumeAge: 24 * time.Hour,
scanInterval: 30 * time.Minute,
}
}
// GetTaskType returns the task type
func (d *VacuumDetector) GetTaskType() types.TaskType {
return types.TaskTypeVacuum
}
// ScanForTasks scans for volumes that need vacuum operations
func (d *VacuumDetector) ScanForTasks(volumeMetrics []*types.VolumeHealthMetrics, clusterInfo *types.ClusterInfo) ([]*types.TaskDetectionResult, error) {
if !d.enabled {
return nil, nil
}
var results []*types.TaskDetectionResult
for _, metric := range volumeMetrics {
// Check if volume needs vacuum
if metric.GarbageRatio >= d.garbageThreshold && metric.Age >= d.minVolumeAge {
// Higher priority for volumes with more garbage
priority := types.TaskPriorityNormal
if metric.GarbageRatio > 0.6 {
priority = types.TaskPriorityHigh
}
result := &types.TaskDetectionResult{
TaskType: types.TaskTypeVacuum,
VolumeID: metric.VolumeID,
Server: metric.Server,
Collection: metric.Collection,
Priority: priority,
Reason: "Volume has excessive garbage requiring vacuum",
Parameters: map[string]interface{}{
"garbage_ratio": metric.GarbageRatio,
"volume_age": metric.Age.String(),
},
ScheduleAt: time.Now(),
}
results = append(results, result)
}
}
glog.V(2).Infof("Vacuum detector found %d volumes needing vacuum", len(results))
return results, nil
}
// ScanInterval returns how often this detector should scan
func (d *VacuumDetector) ScanInterval() time.Duration {
return d.scanInterval
}
// IsEnabled returns whether this detector is enabled
func (d *VacuumDetector) IsEnabled() bool {
return d.enabled
}
// Configuration setters
func (d *VacuumDetector) SetEnabled(enabled bool) {
d.enabled = enabled
}
func (d *VacuumDetector) SetGarbageThreshold(threshold float64) {
d.garbageThreshold = threshold
}
func (d *VacuumDetector) SetScanInterval(interval time.Duration) {
d.scanInterval = interval
}
func (d *VacuumDetector) SetMinVolumeAge(age time.Duration) {
d.minVolumeAge = age
}
// GetGarbageThreshold returns the current garbage threshold
func (d *VacuumDetector) GetGarbageThreshold() float64 {
return d.garbageThreshold
}
// GetMinVolumeAge returns the minimum volume age
func (d *VacuumDetector) GetMinVolumeAge() time.Duration {
return d.minVolumeAge
}
// GetScanInterval returns the scan interval
func (d *VacuumDetector) GetScanInterval() time.Duration {
return d.scanInterval
}
// ConfigureFromPolicy configures the detector based on the maintenance policy
func (d *VacuumDetector) ConfigureFromPolicy(policy interface{}) {
// Type assert to the maintenance policy type we expect
if maintenancePolicy, ok := policy.(interface {
GetVacuumEnabled() bool
GetVacuumGarbageRatio() float64
}); ok {
d.SetEnabled(maintenancePolicy.GetVacuumEnabled())
d.SetGarbageThreshold(maintenancePolicy.GetVacuumGarbageRatio())
} else {
glog.V(1).Infof("Could not configure vacuum detector from policy: unsupported policy type")
}
}

View File

@@ -0,0 +1,81 @@
package vacuum
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// Factory creates vacuum task instances
type Factory struct {
*tasks.BaseTaskFactory
}
// NewFactory creates a new vacuum task factory
func NewFactory() *Factory {
return &Factory{
BaseTaskFactory: tasks.NewBaseTaskFactory(
types.TaskTypeVacuum,
[]string{"vacuum", "storage"},
"Vacuum operation to reclaim disk space by removing deleted files",
),
}
}
// Create creates a new vacuum task instance
func (f *Factory) Create(params types.TaskParams) (types.TaskInterface, error) {
// Validate parameters
if params.VolumeID == 0 {
return nil, fmt.Errorf("volume_id is required")
}
if params.Server == "" {
return nil, fmt.Errorf("server is required")
}
task := NewTask(params.Server, params.VolumeID)
task.SetEstimatedDuration(task.EstimateTime(params))
return task, nil
}
// Shared detector and scheduler instances
var (
sharedDetector *VacuumDetector
sharedScheduler *VacuumScheduler
)
// getSharedInstances returns the shared detector and scheduler instances
func getSharedInstances() (*VacuumDetector, *VacuumScheduler) {
if sharedDetector == nil {
sharedDetector = NewVacuumDetector()
}
if sharedScheduler == nil {
sharedScheduler = NewVacuumScheduler()
}
return sharedDetector, sharedScheduler
}
// GetSharedInstances returns the shared detector and scheduler instances (public access)
func GetSharedInstances() (*VacuumDetector, *VacuumScheduler) {
return getSharedInstances()
}
// Auto-register this task when the package is imported
func init() {
factory := NewFactory()
tasks.AutoRegister(types.TaskTypeVacuum, factory)
// Get shared instances for all registrations
detector, scheduler := getSharedInstances()
// Register with types registry
tasks.AutoRegisterTypes(func(registry *types.TaskRegistry) {
registry.RegisterTask(detector, scheduler)
})
// Register with UI registry using the same instances
tasks.AutoRegisterUI(func(uiRegistry *types.UIRegistry) {
RegisterUI(uiRegistry, detector, scheduler)
})
}

View File

@@ -0,0 +1,111 @@
package vacuum
import (
"time"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
// VacuumScheduler implements vacuum task scheduling using code instead of schemas
type VacuumScheduler struct {
enabled bool
maxConcurrent int
minInterval time.Duration
}
// Compile-time interface assertions
var (
_ types.TaskScheduler = (*VacuumScheduler)(nil)
)
// NewVacuumScheduler creates a new simple vacuum scheduler
func NewVacuumScheduler() *VacuumScheduler {
return &VacuumScheduler{
enabled: true,
maxConcurrent: 2,
minInterval: 6 * time.Hour,
}
}
// GetTaskType returns the task type
func (s *VacuumScheduler) GetTaskType() types.TaskType {
return types.TaskTypeVacuum
}
// CanScheduleNow determines if a vacuum task can be scheduled right now
func (s *VacuumScheduler) CanScheduleNow(task *types.Task, runningTasks []*types.Task, availableWorkers []*types.Worker) bool {
// Check if scheduler is enabled
if !s.enabled {
return false
}
// Check concurrent limit
runningVacuumCount := 0
for _, runningTask := range runningTasks {
if runningTask.Type == types.TaskTypeVacuum {
runningVacuumCount++
}
}
if runningVacuumCount >= s.maxConcurrent {
return false
}
// Check if there's an available worker with vacuum capability
for _, worker := range availableWorkers {
if worker.CurrentLoad < worker.MaxConcurrent {
for _, capability := range worker.Capabilities {
if capability == types.TaskTypeVacuum {
return true
}
}
}
}
return false
}
// GetPriority returns the priority for this task
func (s *VacuumScheduler) GetPriority(task *types.Task) types.TaskPriority {
// Could adjust priority based on task parameters
if params, ok := task.Parameters["garbage_ratio"].(float64); ok {
if params > 0.8 {
return types.TaskPriorityHigh
}
}
return task.Priority
}
// GetMaxConcurrent returns max concurrent tasks of this type
func (s *VacuumScheduler) GetMaxConcurrent() int {
return s.maxConcurrent
}
// GetDefaultRepeatInterval returns the default interval to wait before repeating vacuum tasks
func (s *VacuumScheduler) GetDefaultRepeatInterval() time.Duration {
return s.minInterval
}
// IsEnabled returns whether this scheduler is enabled
func (s *VacuumScheduler) IsEnabled() bool {
return s.enabled
}
// Configuration setters
func (s *VacuumScheduler) SetEnabled(enabled bool) {
s.enabled = enabled
}
func (s *VacuumScheduler) SetMaxConcurrent(max int) {
s.maxConcurrent = max
}
func (s *VacuumScheduler) SetMinInterval(interval time.Duration) {
s.minInterval = interval
}
// GetMinInterval returns the minimum interval
func (s *VacuumScheduler) GetMinInterval() time.Duration {
return s.minInterval
}