Admin: misc improvements on admin server and workers. EC now works. (#7055)

* initial design

* added simulation as tests

* reorganized the codebase to move the simulation framework and tests into their own dedicated package

* integration test. ec worker task

* remove "enhanced" reference

* start master, volume servers, filer

Current Status
 Master: Healthy and running (port 9333)
 Filer: Healthy and running (port 8888)
 Volume Servers: All 6 servers running (ports 8080-8085)
🔄 Admin/Workers: Will start when dependencies are ready

* generate write load

* tasks are assigned

* admin start wtih grpc port. worker has its own working directory

* Update .gitignore

* working worker and admin. Task detection is not working yet.

* compiles, detection uses volumeSizeLimitMB from master

* compiles

* worker retries connecting to admin

* build and restart

* rendering pending tasks

* skip task ID column

* sticky worker id

* test canScheduleTaskNow

* worker reconnect to admin

* clean up logs

* worker register itself first

* worker can run ec work and report status

but:
1. one volume should not be repeatedly worked on.
2. ec shards needs to be distributed and source data should be deleted.

* move ec task logic

* listing ec shards

* local copy, ec. Need to distribute.

* ec is mostly working now

* distribution of ec shards needs improvement
* need configuration to enable ec

* show ec volumes

* interval field UI component

* rename

* integration test with vauuming

* garbage percentage threshold

* fix warning

* display ec shard sizes

* fix ec volumes list

* Update ui.go

* show default values

* ensure correct default value

* MaintenanceConfig use ConfigField

* use schema defined defaults

* config

* reduce duplication

* refactor to use BaseUIProvider

* each task register its schema

* checkECEncodingCandidate use ecDetector

* use vacuumDetector

* use volumeSizeLimitMB

* remove

remove

* remove unused

* refactor

* use new framework

* remove v2 reference

* refactor

* left menu can scroll now

* The maintenance manager was not being initialized when no data directory was configured for persistent storage.

* saving config

* Update task_config_schema_templ.go

* enable/disable tasks

* protobuf encoded task configurations

* fix system settings

* use ui component

* remove logs

* interface{} Reduction

* reduce interface{}

* reduce interface{}

* avoid from/to map

* reduce interface{}

* refactor

* keep it DRY

* added logging

* debug messages

* debug level

* debug

* show the log caller line

* use configured task policy

* log level

* handle admin heartbeat response

* Update worker.go

* fix EC rack and dc count

* Report task status to admin server

* fix task logging, simplify interface checking, use erasure_coding constants

* factor in empty volume server during task planning

* volume.list adds disk id

* track disk id also

* fix locking scheduled and manual scanning

* add active topology

* simplify task detector

* ec task completed, but shards are not showing up

* implement ec in ec_typed.go

* adjust log level

* dedup

* implementing ec copying shards and only ecx files

* use disk id when distributing ec shards

🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk
📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId
🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest
💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId])
📂 File System: EC shards and metadata land in the exact disk directory planned

* Delete original volume from all locations

* clean up existing shard locations

* local encoding and distributing

* Update docker/admin_integration/EC-TESTING-README.md

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* check volume id range

* simplify

* fix tests

* fix types

* clean up logs and tests

---------

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
Chris Lu
2025-07-30 12:38:03 -07:00
committed by GitHub
parent 64198dad83
commit 891a2fb6eb
130 changed files with 27737 additions and 4429 deletions

View File

@@ -78,6 +78,9 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, username,
protected.GET("/cluster/volumes", h.clusterHandlers.ShowClusterVolumes)
protected.GET("/cluster/volumes/:id/:server", h.clusterHandlers.ShowVolumeDetails)
protected.GET("/cluster/collections", h.clusterHandlers.ShowClusterCollections)
protected.GET("/cluster/collections/:name", h.clusterHandlers.ShowCollectionDetails)
protected.GET("/cluster/ec-shards", h.clusterHandlers.ShowClusterEcShards)
protected.GET("/cluster/ec-volumes/:id", h.clusterHandlers.ShowEcVolumeDetails)
// Message Queue management routes
protected.GET("/mq/brokers", h.mqHandlers.ShowBrokers)
@@ -93,7 +96,8 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, username,
protected.POST("/maintenance/config/:taskType", h.maintenanceHandlers.UpdateTaskConfig)
// API routes for AJAX calls
api := protected.Group("/api")
api := r.Group("/api")
api.Use(dash.RequireAuthAPI()) // Use API-specific auth middleware
{
api.GET("/cluster/topology", h.clusterHandlers.GetClusterTopology)
api.GET("/cluster/masters", h.clusterHandlers.GetMasters)
@@ -198,6 +202,9 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, username,
r.GET("/cluster/volumes", h.clusterHandlers.ShowClusterVolumes)
r.GET("/cluster/volumes/:id/:server", h.clusterHandlers.ShowVolumeDetails)
r.GET("/cluster/collections", h.clusterHandlers.ShowClusterCollections)
r.GET("/cluster/collections/:name", h.clusterHandlers.ShowCollectionDetails)
r.GET("/cluster/ec-shards", h.clusterHandlers.ShowClusterEcShards)
r.GET("/cluster/ec-volumes/:id", h.clusterHandlers.ShowEcVolumeDetails)
// Message Queue management routes
r.GET("/mq/brokers", h.mqHandlers.ShowBrokers)

View File

@@ -1,6 +1,7 @@
package handlers
import (
"math"
"net/http"
"strconv"
@@ -161,6 +162,129 @@ func (h *ClusterHandlers) ShowClusterCollections(c *gin.Context) {
}
}
// ShowCollectionDetails renders the collection detail page
func (h *ClusterHandlers) ShowCollectionDetails(c *gin.Context) {
collectionName := c.Param("name")
if collectionName == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Collection name is required"})
return
}
// Parse query parameters
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "25"))
sortBy := c.DefaultQuery("sort_by", "volume_id")
sortOrder := c.DefaultQuery("sort_order", "asc")
// Get collection details data (volumes and EC volumes)
collectionDetailsData, err := h.adminServer.GetCollectionDetails(collectionName, page, pageSize, sortBy, sortOrder)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get collection details: " + err.Error()})
return
}
// Set username
username := c.GetString("username")
if username == "" {
username = "admin"
}
collectionDetailsData.Username = username
// Render HTML template
c.Header("Content-Type", "text/html")
collectionDetailsComponent := app.CollectionDetails(*collectionDetailsData)
layoutComponent := layout.Layout(c, collectionDetailsComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
// ShowClusterEcShards handles the cluster EC shards page (individual shards view)
func (h *ClusterHandlers) ShowClusterEcShards(c *gin.Context) {
// Parse query parameters
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "100"))
sortBy := c.DefaultQuery("sort_by", "volume_id")
sortOrder := c.DefaultQuery("sort_order", "asc")
collection := c.DefaultQuery("collection", "")
// Get data from admin server
data, err := h.adminServer.GetClusterEcVolumes(page, pageSize, sortBy, sortOrder, collection)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
// Set username
username := c.GetString("username")
if username == "" {
username = "admin"
}
data.Username = username
// Render template
c.Header("Content-Type", "text/html")
ecVolumesComponent := app.ClusterEcVolumes(*data)
layoutComponent := layout.Layout(c, ecVolumesComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
}
// ShowEcVolumeDetails renders the EC volume details page
func (h *ClusterHandlers) ShowEcVolumeDetails(c *gin.Context) {
volumeIDStr := c.Param("id")
if volumeIDStr == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID is required"})
return
}
volumeID, err := strconv.Atoi(volumeIDStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid volume ID"})
return
}
// Check that volumeID is within uint32 range
if volumeID < 0 || volumeID > int(math.MaxUint32) {
c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID out of range"})
return
}
// Parse sorting parameters
sortBy := c.DefaultQuery("sort_by", "shard_id")
sortOrder := c.DefaultQuery("sort_order", "asc")
// Get EC volume details
ecVolumeDetails, err := h.adminServer.GetEcVolumeDetails(uint32(volumeID), sortBy, sortOrder)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get EC volume details: " + err.Error()})
return
}
// Set username
username := c.GetString("username")
if username == "" {
username = "admin"
}
ecVolumeDetails.Username = username
// Render HTML template
c.Header("Content-Type", "text/html")
ecVolumeDetailsComponent := app.EcVolumeDetails(*ecVolumeDetails)
layoutComponent := layout.Layout(c, ecVolumeDetailsComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
// ShowClusterMasters renders the cluster masters page
func (h *ClusterHandlers) ShowClusterMasters(c *gin.Context) {
// Get cluster masters data

View File

@@ -1,16 +1,24 @@
package handlers
import (
"fmt"
"net/http"
"reflect"
"strconv"
"strings"
"time"
"github.com/gin-gonic/gin"
"github.com/seaweedfs/seaweedfs/weed/admin/config"
"github.com/seaweedfs/seaweedfs/weed/admin/dash"
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
"github.com/seaweedfs/seaweedfs/weed/admin/view/app"
"github.com/seaweedfs/seaweedfs/weed/admin/view/components"
"github.com/seaweedfs/seaweedfs/weed/admin/view/layout"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
@@ -30,19 +38,31 @@ func NewMaintenanceHandlers(adminServer *dash.AdminServer) *MaintenanceHandlers
func (h *MaintenanceHandlers) ShowMaintenanceQueue(c *gin.Context) {
data, err := h.getMaintenanceQueueData()
if err != nil {
glog.Infof("DEBUG ShowMaintenanceQueue: error getting data: %v", err)
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
glog.Infof("DEBUG ShowMaintenanceQueue: got data with %d tasks", len(data.Tasks))
if data.Stats != nil {
glog.Infof("DEBUG ShowMaintenanceQueue: stats = {pending: %d, running: %d, completed: %d}",
data.Stats.PendingTasks, data.Stats.RunningTasks, data.Stats.CompletedToday)
} else {
glog.Infof("DEBUG ShowMaintenanceQueue: stats is nil")
}
// Render HTML template
c.Header("Content-Type", "text/html")
maintenanceComponent := app.MaintenanceQueue(data)
layoutComponent := layout.Layout(c, maintenanceComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
glog.Infof("DEBUG ShowMaintenanceQueue: render error: %v", err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
glog.Infof("DEBUG ShowMaintenanceQueue: template rendered successfully")
}
// ShowMaintenanceWorkers displays the maintenance workers page
@@ -72,9 +92,12 @@ func (h *MaintenanceHandlers) ShowMaintenanceConfig(c *gin.Context) {
return
}
// Render HTML template
// Get the schema for dynamic form rendering
schema := maintenance.GetMaintenanceConfigSchema()
// Render HTML template using schema-driven approach
c.Header("Content-Type", "text/html")
configComponent := app.MaintenanceConfig(config)
configComponent := app.MaintenanceConfigSchema(config, schema)
layoutComponent := layout.Layout(c, configComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
@@ -87,20 +110,20 @@ func (h *MaintenanceHandlers) ShowMaintenanceConfig(c *gin.Context) {
func (h *MaintenanceHandlers) ShowTaskConfig(c *gin.Context) {
taskTypeName := c.Param("taskType")
// Get the task type
taskType := maintenance.GetMaintenanceTaskType(taskTypeName)
if taskType == "" {
c.JSON(http.StatusNotFound, gin.H{"error": "Task type not found"})
// Get the schema for this task type
schema := tasks.GetTaskConfigSchema(taskTypeName)
if schema == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "Task type not found or no schema available"})
return
}
// Get the UI provider for this task type
// Get the UI provider for current configuration
uiRegistry := tasks.GetGlobalUIRegistry()
typesRegistry := tasks.GetGlobalTypesRegistry()
var provider types.TaskUIProvider
for workerTaskType := range typesRegistry.GetAllDetectors() {
if string(workerTaskType) == string(taskType) {
if string(workerTaskType) == taskTypeName {
provider = uiRegistry.GetProvider(workerTaskType)
break
}
@@ -111,73 +134,23 @@ func (h *MaintenanceHandlers) ShowTaskConfig(c *gin.Context) {
return
}
// Try to get templ UI provider first - temporarily disabled
// templUIProvider := getTemplUIProvider(taskType)
var configSections []components.ConfigSectionData
// Get current configuration
currentConfig := provider.GetCurrentConfig()
// Temporarily disabled templ UI provider
// if templUIProvider != nil {
// // Use the new templ-based UI provider
// currentConfig := templUIProvider.GetCurrentConfig()
// sections, err := templUIProvider.RenderConfigSections(currentConfig)
// if err != nil {
// c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render configuration sections: " + err.Error()})
// return
// }
// configSections = sections
// } else {
// Fallback to basic configuration for providers that haven't been migrated yet
configSections = []components.ConfigSectionData{
{
Title: "Configuration Settings",
Icon: "fas fa-cogs",
Description: "Configure task detection and scheduling parameters",
Fields: []interface{}{
components.CheckboxFieldData{
FormFieldData: components.FormFieldData{
Name: "enabled",
Label: "Enable Task",
Description: "Whether this task type should be enabled",
},
Checked: true,
},
components.NumberFieldData{
FormFieldData: components.FormFieldData{
Name: "max_concurrent",
Label: "Max Concurrent Tasks",
Description: "Maximum number of concurrent tasks",
Required: true,
},
Value: 2,
Step: "1",
Min: floatPtr(1),
},
components.DurationFieldData{
FormFieldData: components.FormFieldData{
Name: "scan_interval",
Label: "Scan Interval",
Description: "How often to scan for tasks",
Required: true,
},
Value: "30m",
},
},
},
}
// } // End of disabled templ UI provider else block
// Note: Do NOT apply schema defaults to current config as it overrides saved values
// Only apply defaults when creating new configs, not when displaying existing ones
// Create task configuration data using templ components
configData := &app.TaskConfigTemplData{
TaskType: taskType,
TaskName: provider.GetDisplayName(),
TaskIcon: provider.GetIcon(),
Description: provider.GetDescription(),
ConfigSections: configSections,
// Create task configuration data
configData := &maintenance.TaskConfigData{
TaskType: maintenance.MaintenanceTaskType(taskTypeName),
TaskName: schema.DisplayName,
TaskIcon: schema.Icon,
Description: schema.Description,
}
// Render HTML template using templ components
// Render HTML template using schema-based approach
c.Header("Content-Type", "text/html")
taskConfigComponent := app.TaskConfigTempl(configData)
taskConfigComponent := app.TaskConfigSchema(configData, schema, currentConfig)
layoutComponent := layout.Layout(c, taskConfigComponent)
err := layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
@@ -186,19 +159,10 @@ func (h *MaintenanceHandlers) ShowTaskConfig(c *gin.Context) {
}
}
// UpdateTaskConfig updates configuration for a specific task type
// UpdateTaskConfig updates task configuration from form
func (h *MaintenanceHandlers) UpdateTaskConfig(c *gin.Context) {
taskTypeName := c.Param("taskType")
// Get the task type
taskType := maintenance.GetMaintenanceTaskType(taskTypeName)
if taskType == "" {
c.JSON(http.StatusNotFound, gin.H{"error": "Task type not found"})
return
}
// Try to get templ UI provider first - temporarily disabled
// templUIProvider := getTemplUIProvider(taskType)
taskType := types.TaskType(taskTypeName)
// Parse form data
err := c.Request.ParseForm()
@@ -207,31 +171,100 @@ func (h *MaintenanceHandlers) UpdateTaskConfig(c *gin.Context) {
return
}
// Convert form data to map
formData := make(map[string][]string)
// Debug logging - show received form data
glog.V(1).Infof("Received form data for task type %s:", taskTypeName)
for key, values := range c.Request.PostForm {
formData[key] = values
glog.V(1).Infof(" %s: %v", key, values)
}
var config interface{}
// Get the task configuration schema
schema := tasks.GetTaskConfigSchema(taskTypeName)
if schema == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "Schema not found for task type: " + taskTypeName})
return
}
// Temporarily disabled templ UI provider
// if templUIProvider != nil {
// // Use the new templ-based UI provider
// config, err = templUIProvider.ParseConfigForm(formData)
// if err != nil {
// c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse configuration: " + err.Error()})
// return
// }
// // Apply configuration using templ provider
// err = templUIProvider.ApplyConfig(config)
// if err != nil {
// c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to apply configuration: " + err.Error()})
// return
// }
// } else {
// Fallback to old UI provider for tasks that haven't been migrated yet
// Fallback to old UI provider for tasks that haven't been migrated yet
// Create a new config instance based on task type and apply schema defaults
var config TaskConfig
switch taskType {
case types.TaskTypeVacuum:
config = &vacuum.Config{}
case types.TaskTypeBalance:
config = &balance.Config{}
case types.TaskTypeErasureCoding:
config = &erasure_coding.Config{}
default:
c.JSON(http.StatusBadRequest, gin.H{"error": "Unsupported task type: " + taskTypeName})
return
}
// Apply schema defaults first using type-safe method
if err := schema.ApplyDefaultsToConfig(config); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to apply defaults: " + err.Error()})
return
}
// First, get the current configuration to preserve existing values
currentUIRegistry := tasks.GetGlobalUIRegistry()
currentTypesRegistry := tasks.GetGlobalTypesRegistry()
var currentProvider types.TaskUIProvider
for workerTaskType := range currentTypesRegistry.GetAllDetectors() {
if string(workerTaskType) == string(taskType) {
currentProvider = currentUIRegistry.GetProvider(workerTaskType)
break
}
}
if currentProvider != nil {
// Copy current config values to the new config
currentConfig := currentProvider.GetCurrentConfig()
if currentConfigProtobuf, ok := currentConfig.(TaskConfig); ok {
// Apply current values using protobuf directly - no map conversion needed!
currentPolicy := currentConfigProtobuf.ToTaskPolicy()
if err := config.FromTaskPolicy(currentPolicy); err != nil {
glog.Warningf("Failed to load current config for %s: %v", taskTypeName, err)
}
}
}
// Parse form data using schema-based approach (this will override with new values)
err = h.parseTaskConfigFromForm(c.Request.PostForm, schema, config)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse configuration: " + err.Error()})
return
}
// Debug logging - show parsed config values
switch taskType {
case types.TaskTypeVacuum:
if vacuumConfig, ok := config.(*vacuum.Config); ok {
glog.V(1).Infof("Parsed vacuum config - GarbageThreshold: %f, MinVolumeAgeSeconds: %d, MinIntervalSeconds: %d",
vacuumConfig.GarbageThreshold, vacuumConfig.MinVolumeAgeSeconds, vacuumConfig.MinIntervalSeconds)
}
case types.TaskTypeErasureCoding:
if ecConfig, ok := config.(*erasure_coding.Config); ok {
glog.V(1).Infof("Parsed EC config - FullnessRatio: %f, QuietForSeconds: %d, MinSizeMB: %d, CollectionFilter: '%s'",
ecConfig.FullnessRatio, ecConfig.QuietForSeconds, ecConfig.MinSizeMB, ecConfig.CollectionFilter)
}
case types.TaskTypeBalance:
if balanceConfig, ok := config.(*balance.Config); ok {
glog.V(1).Infof("Parsed balance config - Enabled: %v, MaxConcurrent: %d, ScanIntervalSeconds: %d, ImbalanceThreshold: %f, MinServerCount: %d",
balanceConfig.Enabled, balanceConfig.MaxConcurrent, balanceConfig.ScanIntervalSeconds, balanceConfig.ImbalanceThreshold, balanceConfig.MinServerCount)
}
}
// Validate the configuration
if validationErrors := schema.ValidateConfig(config); len(validationErrors) > 0 {
errorMessages := make([]string, len(validationErrors))
for i, err := range validationErrors {
errorMessages[i] = err.Error()
}
c.JSON(http.StatusBadRequest, gin.H{"error": "Configuration validation failed", "details": errorMessages})
return
}
// Apply configuration using UIProvider
uiRegistry := tasks.GetGlobalUIRegistry()
typesRegistry := tasks.GetGlobalTypesRegistry()
@@ -248,25 +281,153 @@ func (h *MaintenanceHandlers) UpdateTaskConfig(c *gin.Context) {
return
}
// Parse configuration from form using old provider
config, err = provider.ParseConfigForm(formData)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse configuration: " + err.Error()})
return
}
// Apply configuration using old provider
err = provider.ApplyConfig(config)
// Apply configuration using provider
err = provider.ApplyTaskConfig(config)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to apply configuration: " + err.Error()})
return
}
// } // End of disabled templ UI provider else block
// Save task configuration to protobuf file using ConfigPersistence
if h.adminServer != nil && h.adminServer.GetConfigPersistence() != nil {
err = h.saveTaskConfigToProtobuf(taskType, config)
if err != nil {
glog.Warningf("Failed to save task config to protobuf file: %v", err)
// Don't fail the request, just log the warning
}
}
// Trigger a configuration reload in the maintenance manager
if h.adminServer != nil {
if manager := h.adminServer.GetMaintenanceManager(); manager != nil {
err = manager.ReloadTaskConfigurations()
if err != nil {
glog.Warningf("Failed to reload task configurations: %v", err)
} else {
glog.V(1).Infof("Successfully reloaded task configurations after updating %s", taskTypeName)
}
}
}
// Redirect back to task configuration page
c.Redirect(http.StatusSeeOther, "/maintenance/config/"+taskTypeName)
}
// parseTaskConfigFromForm parses form data using schema definitions
func (h *MaintenanceHandlers) parseTaskConfigFromForm(formData map[string][]string, schema *tasks.TaskConfigSchema, config interface{}) error {
configValue := reflect.ValueOf(config)
if configValue.Kind() == reflect.Ptr {
configValue = configValue.Elem()
}
if configValue.Kind() != reflect.Struct {
return fmt.Errorf("config must be a struct or pointer to struct")
}
configType := configValue.Type()
for i := 0; i < configValue.NumField(); i++ {
field := configValue.Field(i)
fieldType := configType.Field(i)
// Handle embedded structs recursively
if fieldType.Anonymous && field.Kind() == reflect.Struct {
err := h.parseTaskConfigFromForm(formData, schema, field.Addr().Interface())
if err != nil {
return fmt.Errorf("error parsing embedded struct %s: %w", fieldType.Name, err)
}
continue
}
// Get JSON tag name
jsonTag := fieldType.Tag.Get("json")
if jsonTag == "" {
continue
}
// Remove options like ",omitempty"
if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 {
jsonTag = jsonTag[:commaIdx]
}
// Find corresponding schema field
schemaField := schema.GetFieldByName(jsonTag)
if schemaField == nil {
continue
}
// Parse value based on field type
if err := h.parseFieldFromForm(formData, schemaField, field); err != nil {
return fmt.Errorf("error parsing field %s: %w", schemaField.DisplayName, err)
}
}
return nil
}
// parseFieldFromForm parses a single field value from form data
func (h *MaintenanceHandlers) parseFieldFromForm(formData map[string][]string, schemaField *config.Field, fieldValue reflect.Value) error {
if !fieldValue.CanSet() {
return nil
}
switch schemaField.Type {
case config.FieldTypeBool:
// Checkbox fields - present means true, absent means false
_, exists := formData[schemaField.JSONName]
fieldValue.SetBool(exists)
case config.FieldTypeInt:
if values, ok := formData[schemaField.JSONName]; ok && len(values) > 0 {
if intVal, err := strconv.Atoi(values[0]); err != nil {
return fmt.Errorf("invalid integer value: %s", values[0])
} else {
fieldValue.SetInt(int64(intVal))
}
}
case config.FieldTypeFloat:
if values, ok := formData[schemaField.JSONName]; ok && len(values) > 0 {
if floatVal, err := strconv.ParseFloat(values[0], 64); err != nil {
return fmt.Errorf("invalid float value: %s", values[0])
} else {
fieldValue.SetFloat(floatVal)
}
}
case config.FieldTypeString:
if values, ok := formData[schemaField.JSONName]; ok && len(values) > 0 {
fieldValue.SetString(values[0])
}
case config.FieldTypeInterval:
// Parse interval fields with value + unit
valueKey := schemaField.JSONName + "_value"
unitKey := schemaField.JSONName + "_unit"
if valueStrs, ok := formData[valueKey]; ok && len(valueStrs) > 0 {
value, err := strconv.Atoi(valueStrs[0])
if err != nil {
return fmt.Errorf("invalid interval value: %s", valueStrs[0])
}
unit := "minutes" // default
if unitStrs, ok := formData[unitKey]; ok && len(unitStrs) > 0 {
unit = unitStrs[0]
}
// Convert to seconds
seconds := config.IntervalValueUnitToSeconds(value, unit)
fieldValue.SetInt(int64(seconds))
}
default:
return fmt.Errorf("unsupported field type: %s", schemaField.Type)
}
return nil
}
// UpdateMaintenanceConfig updates maintenance configuration from form
func (h *MaintenanceHandlers) UpdateMaintenanceConfig(c *gin.Context) {
var config maintenance.MaintenanceConfig
@@ -302,36 +463,50 @@ func (h *MaintenanceHandlers) getMaintenanceQueueData() (*maintenance.Maintenanc
return nil, err
}
return &maintenance.MaintenanceQueueData{
data := &maintenance.MaintenanceQueueData{
Tasks: tasks,
Workers: workers,
Stats: stats,
LastUpdated: time.Now(),
}, nil
}
return data, nil
}
func (h *MaintenanceHandlers) getMaintenanceQueueStats() (*maintenance.QueueStats, error) {
// This would integrate with the maintenance queue to get real statistics
// For now, return mock data
return &maintenance.QueueStats{
PendingTasks: 5,
RunningTasks: 2,
CompletedToday: 15,
FailedToday: 1,
TotalTasks: 23,
}, nil
// Use the exported method from AdminServer
return h.adminServer.GetMaintenanceQueueStats()
}
func (h *MaintenanceHandlers) getMaintenanceTasks() ([]*maintenance.MaintenanceTask, error) {
// This would integrate with the maintenance queue to get real tasks
// For now, return mock data
return []*maintenance.MaintenanceTask{}, nil
// Call the maintenance manager directly to get all tasks
if h.adminServer == nil {
return []*maintenance.MaintenanceTask{}, nil
}
manager := h.adminServer.GetMaintenanceManager()
if manager == nil {
return []*maintenance.MaintenanceTask{}, nil
}
// Get ALL tasks using empty parameters - this should match what the API returns
allTasks := manager.GetTasks("", "", 0)
return allTasks, nil
}
func (h *MaintenanceHandlers) getMaintenanceWorkers() ([]*maintenance.MaintenanceWorker, error) {
// This would integrate with the maintenance system to get real workers
// For now, return mock data
return []*maintenance.MaintenanceWorker{}, nil
// Get workers from the admin server's maintenance manager
if h.adminServer == nil {
return []*maintenance.MaintenanceWorker{}, nil
}
if h.adminServer.GetMaintenanceManager() == nil {
return []*maintenance.MaintenanceWorker{}, nil
}
// Get workers from the maintenance manager
workers := h.adminServer.GetMaintenanceManager().GetWorkers()
return workers, nil
}
func (h *MaintenanceHandlers) getMaintenanceConfig() (*maintenance.MaintenanceConfigData, error) {
@@ -344,40 +519,25 @@ func (h *MaintenanceHandlers) updateMaintenanceConfig(config *maintenance.Mainte
return h.adminServer.UpdateMaintenanceConfigData(config)
}
// floatPtr is a helper function to create float64 pointers
func floatPtr(f float64) *float64 {
return &f
}
// saveTaskConfigToProtobuf saves task configuration to protobuf file
func (h *MaintenanceHandlers) saveTaskConfigToProtobuf(taskType types.TaskType, config TaskConfig) error {
configPersistence := h.adminServer.GetConfigPersistence()
if configPersistence == nil {
return fmt.Errorf("config persistence not available")
}
// Global templ UI registry - temporarily disabled
// var globalTemplUIRegistry *types.UITemplRegistry
// Use the new ToTaskPolicy method - much simpler and more maintainable!
taskPolicy := config.ToTaskPolicy()
// initTemplUIRegistry initializes the global templ UI registry - temporarily disabled
func initTemplUIRegistry() {
// Temporarily disabled due to missing types
// if globalTemplUIRegistry == nil {
// globalTemplUIRegistry = types.NewUITemplRegistry()
// // Register vacuum templ UI provider using shared instances
// vacuumDetector, vacuumScheduler := vacuum.GetSharedInstances()
// vacuum.RegisterUITempl(globalTemplUIRegistry, vacuumDetector, vacuumScheduler)
// // Register erasure coding templ UI provider using shared instances
// erasureCodingDetector, erasureCodingScheduler := erasure_coding.GetSharedInstances()
// erasure_coding.RegisterUITempl(globalTemplUIRegistry, erasureCodingDetector, erasureCodingScheduler)
// // Register balance templ UI provider using shared instances
// balanceDetector, balanceScheduler := balance.GetSharedInstances()
// balance.RegisterUITempl(globalTemplUIRegistry, balanceDetector, balanceScheduler)
// }
}
// getTemplUIProvider gets the templ UI provider for a task type - temporarily disabled
func getTemplUIProvider(taskType maintenance.MaintenanceTaskType) interface{} {
// initTemplUIRegistry()
// Convert maintenance task type to worker task type
// typesRegistry := tasks.GetGlobalTypesRegistry()
// for workerTaskType := range typesRegistry.GetAllDetectors() {
// if string(workerTaskType) == string(taskType) {
// return globalTemplUIRegistry.GetProvider(workerTaskType)
// }
// }
return nil
// Save using task-specific methods
switch taskType {
case types.TaskTypeVacuum:
return configPersistence.SaveVacuumTaskPolicy(taskPolicy)
case types.TaskTypeErasureCoding:
return configPersistence.SaveErasureCodingTaskPolicy(taskPolicy)
case types.TaskTypeBalance:
return configPersistence.SaveBalanceTaskPolicy(taskPolicy)
default:
return fmt.Errorf("unsupported task type for protobuf persistence: %s", taskType)
}
}

View File

@@ -0,0 +1,389 @@
package handlers
import (
"net/url"
"testing"
"github.com/seaweedfs/seaweedfs/weed/admin/config"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum"
)
func TestParseTaskConfigFromForm_WithEmbeddedStruct(t *testing.T) {
// Create a maintenance handlers instance for testing
h := &MaintenanceHandlers{}
// Test with balance config
t.Run("Balance Config", func(t *testing.T) {
// Simulate form data
formData := url.Values{
"enabled": {"on"}, // checkbox field
"scan_interval_seconds_value": {"30"}, // interval field
"scan_interval_seconds_unit": {"minutes"}, // interval unit
"max_concurrent": {"2"}, // number field
"imbalance_threshold": {"0.15"}, // float field
"min_server_count": {"3"}, // number field
}
// Get schema
schema := tasks.GetTaskConfigSchema("balance")
if schema == nil {
t.Fatal("Failed to get balance schema")
}
// Create config instance
config := &balance.Config{}
// Parse form data
err := h.parseTaskConfigFromForm(formData, schema, config)
if err != nil {
t.Fatalf("Failed to parse form data: %v", err)
}
// Verify embedded struct fields were set correctly
if !config.Enabled {
t.Errorf("Expected Enabled=true, got %v", config.Enabled)
}
if config.ScanIntervalSeconds != 1800 { // 30 minutes * 60
t.Errorf("Expected ScanIntervalSeconds=1800, got %v", config.ScanIntervalSeconds)
}
if config.MaxConcurrent != 2 {
t.Errorf("Expected MaxConcurrent=2, got %v", config.MaxConcurrent)
}
// Verify balance-specific fields were set correctly
if config.ImbalanceThreshold != 0.15 {
t.Errorf("Expected ImbalanceThreshold=0.15, got %v", config.ImbalanceThreshold)
}
if config.MinServerCount != 3 {
t.Errorf("Expected MinServerCount=3, got %v", config.MinServerCount)
}
})
// Test with vacuum config
t.Run("Vacuum Config", func(t *testing.T) {
// Simulate form data
formData := url.Values{
// "enabled" field omitted to simulate unchecked checkbox
"scan_interval_seconds_value": {"4"}, // interval field
"scan_interval_seconds_unit": {"hours"}, // interval unit
"max_concurrent": {"3"}, // number field
"garbage_threshold": {"0.4"}, // float field
"min_volume_age_seconds_value": {"2"}, // interval field
"min_volume_age_seconds_unit": {"days"}, // interval unit
"min_interval_seconds_value": {"1"}, // interval field
"min_interval_seconds_unit": {"days"}, // interval unit
}
// Get schema
schema := tasks.GetTaskConfigSchema("vacuum")
if schema == nil {
t.Fatal("Failed to get vacuum schema")
}
// Create config instance
config := &vacuum.Config{}
// Parse form data
err := h.parseTaskConfigFromForm(formData, schema, config)
if err != nil {
t.Fatalf("Failed to parse form data: %v", err)
}
// Verify embedded struct fields were set correctly
if config.Enabled {
t.Errorf("Expected Enabled=false, got %v", config.Enabled)
}
if config.ScanIntervalSeconds != 14400 { // 4 hours * 3600
t.Errorf("Expected ScanIntervalSeconds=14400, got %v", config.ScanIntervalSeconds)
}
if config.MaxConcurrent != 3 {
t.Errorf("Expected MaxConcurrent=3, got %v", config.MaxConcurrent)
}
// Verify vacuum-specific fields were set correctly
if config.GarbageThreshold != 0.4 {
t.Errorf("Expected GarbageThreshold=0.4, got %v", config.GarbageThreshold)
}
if config.MinVolumeAgeSeconds != 172800 { // 2 days * 86400
t.Errorf("Expected MinVolumeAgeSeconds=172800, got %v", config.MinVolumeAgeSeconds)
}
if config.MinIntervalSeconds != 86400 { // 1 day * 86400
t.Errorf("Expected MinIntervalSeconds=86400, got %v", config.MinIntervalSeconds)
}
})
// Test with erasure coding config
t.Run("Erasure Coding Config", func(t *testing.T) {
// Simulate form data
formData := url.Values{
"enabled": {"on"}, // checkbox field
"scan_interval_seconds_value": {"2"}, // interval field
"scan_interval_seconds_unit": {"hours"}, // interval unit
"max_concurrent": {"1"}, // number field
"quiet_for_seconds_value": {"10"}, // interval field
"quiet_for_seconds_unit": {"minutes"}, // interval unit
"fullness_ratio": {"0.85"}, // float field
"collection_filter": {"test_collection"}, // string field
"min_size_mb": {"50"}, // number field
}
// Get schema
schema := tasks.GetTaskConfigSchema("erasure_coding")
if schema == nil {
t.Fatal("Failed to get erasure_coding schema")
}
// Create config instance
config := &erasure_coding.Config{}
// Parse form data
err := h.parseTaskConfigFromForm(formData, schema, config)
if err != nil {
t.Fatalf("Failed to parse form data: %v", err)
}
// Verify embedded struct fields were set correctly
if !config.Enabled {
t.Errorf("Expected Enabled=true, got %v", config.Enabled)
}
if config.ScanIntervalSeconds != 7200 { // 2 hours * 3600
t.Errorf("Expected ScanIntervalSeconds=7200, got %v", config.ScanIntervalSeconds)
}
if config.MaxConcurrent != 1 {
t.Errorf("Expected MaxConcurrent=1, got %v", config.MaxConcurrent)
}
// Verify erasure coding-specific fields were set correctly
if config.QuietForSeconds != 600 { // 10 minutes * 60
t.Errorf("Expected QuietForSeconds=600, got %v", config.QuietForSeconds)
}
if config.FullnessRatio != 0.85 {
t.Errorf("Expected FullnessRatio=0.85, got %v", config.FullnessRatio)
}
if config.CollectionFilter != "test_collection" {
t.Errorf("Expected CollectionFilter='test_collection', got %v", config.CollectionFilter)
}
if config.MinSizeMB != 50 {
t.Errorf("Expected MinSizeMB=50, got %v", config.MinSizeMB)
}
})
}
func TestConfigurationValidation(t *testing.T) {
// Test that config structs can be validated and converted to protobuf format
taskTypes := []struct {
name string
config interface{}
}{
{
"balance",
&balance.Config{
BaseConfig: base.BaseConfig{
Enabled: true,
ScanIntervalSeconds: 2400,
MaxConcurrent: 3,
},
ImbalanceThreshold: 0.18,
MinServerCount: 4,
},
},
{
"vacuum",
&vacuum.Config{
BaseConfig: base.BaseConfig{
Enabled: false,
ScanIntervalSeconds: 7200,
MaxConcurrent: 2,
},
GarbageThreshold: 0.35,
MinVolumeAgeSeconds: 86400,
MinIntervalSeconds: 604800,
},
},
{
"erasure_coding",
&erasure_coding.Config{
BaseConfig: base.BaseConfig{
Enabled: true,
ScanIntervalSeconds: 3600,
MaxConcurrent: 1,
},
QuietForSeconds: 900,
FullnessRatio: 0.9,
CollectionFilter: "important",
MinSizeMB: 100,
},
},
}
for _, test := range taskTypes {
t.Run(test.name, func(t *testing.T) {
// Test that configs can be converted to protobuf TaskPolicy
switch cfg := test.config.(type) {
case *balance.Config:
policy := cfg.ToTaskPolicy()
if policy == nil {
t.Fatal("ToTaskPolicy returned nil")
}
if policy.Enabled != cfg.Enabled {
t.Errorf("Expected Enabled=%v, got %v", cfg.Enabled, policy.Enabled)
}
if policy.MaxConcurrent != int32(cfg.MaxConcurrent) {
t.Errorf("Expected MaxConcurrent=%v, got %v", cfg.MaxConcurrent, policy.MaxConcurrent)
}
case *vacuum.Config:
policy := cfg.ToTaskPolicy()
if policy == nil {
t.Fatal("ToTaskPolicy returned nil")
}
if policy.Enabled != cfg.Enabled {
t.Errorf("Expected Enabled=%v, got %v", cfg.Enabled, policy.Enabled)
}
if policy.MaxConcurrent != int32(cfg.MaxConcurrent) {
t.Errorf("Expected MaxConcurrent=%v, got %v", cfg.MaxConcurrent, policy.MaxConcurrent)
}
case *erasure_coding.Config:
policy := cfg.ToTaskPolicy()
if policy == nil {
t.Fatal("ToTaskPolicy returned nil")
}
if policy.Enabled != cfg.Enabled {
t.Errorf("Expected Enabled=%v, got %v", cfg.Enabled, policy.Enabled)
}
if policy.MaxConcurrent != int32(cfg.MaxConcurrent) {
t.Errorf("Expected MaxConcurrent=%v, got %v", cfg.MaxConcurrent, policy.MaxConcurrent)
}
default:
t.Fatalf("Unknown config type: %T", test.config)
}
// Test that configs can be validated
switch cfg := test.config.(type) {
case *balance.Config:
if err := cfg.Validate(); err != nil {
t.Errorf("Validation failed: %v", err)
}
case *vacuum.Config:
if err := cfg.Validate(); err != nil {
t.Errorf("Validation failed: %v", err)
}
case *erasure_coding.Config:
if err := cfg.Validate(); err != nil {
t.Errorf("Validation failed: %v", err)
}
}
})
}
}
func TestParseFieldFromForm_EdgeCases(t *testing.T) {
h := &MaintenanceHandlers{}
// Test checkbox parsing (boolean fields)
t.Run("Checkbox Fields", func(t *testing.T) {
tests := []struct {
name string
formData url.Values
expectedValue bool
}{
{"Checked checkbox", url.Values{"test_field": {"on"}}, true},
{"Unchecked checkbox", url.Values{}, false},
{"Empty value checkbox", url.Values{"test_field": {""}}, true}, // Present but empty means checked
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
schema := &tasks.TaskConfigSchema{
Schema: config.Schema{
Fields: []*config.Field{
{
JSONName: "test_field",
Type: config.FieldTypeBool,
InputType: "checkbox",
},
},
},
}
type TestConfig struct {
TestField bool `json:"test_field"`
}
config := &TestConfig{}
err := h.parseTaskConfigFromForm(test.formData, schema, config)
if err != nil {
t.Fatalf("parseTaskConfigFromForm failed: %v", err)
}
if config.TestField != test.expectedValue {
t.Errorf("Expected %v, got %v", test.expectedValue, config.TestField)
}
})
}
})
// Test interval parsing
t.Run("Interval Fields", func(t *testing.T) {
tests := []struct {
name string
value string
unit string
expectedSecs int
}{
{"Minutes", "30", "minutes", 1800},
{"Hours", "2", "hours", 7200},
{"Days", "1", "days", 86400},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
formData := url.Values{
"test_field_value": {test.value},
"test_field_unit": {test.unit},
}
schema := &tasks.TaskConfigSchema{
Schema: config.Schema{
Fields: []*config.Field{
{
JSONName: "test_field",
Type: config.FieldTypeInterval,
InputType: "interval",
},
},
},
}
type TestConfig struct {
TestField int `json:"test_field"`
}
config := &TestConfig{}
err := h.parseTaskConfigFromForm(formData, schema, config)
if err != nil {
t.Fatalf("parseTaskConfigFromForm failed: %v", err)
}
if config.TestField != test.expectedSecs {
t.Errorf("Expected %d seconds, got %d", test.expectedSecs, config.TestField)
}
})
}
})
}

View File

@@ -0,0 +1,25 @@
package handlers
import (
"github.com/seaweedfs/seaweedfs/weed/admin/config"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
)
// TaskConfig defines the interface that all task configuration types must implement
type TaskConfig interface {
config.ConfigWithDefaults // Extends ConfigWithDefaults for type-safe schema operations
// Common methods from BaseConfig
IsEnabled() bool
SetEnabled(enabled bool)
// Protobuf serialization methods - no more map[string]interface{}!
ToTaskPolicy() *worker_pb.TaskPolicy
FromTaskPolicy(policy *worker_pb.TaskPolicy) error
}
// TaskConfigProvider defines the interface for creating specific task config types
type TaskConfigProvider interface {
NewConfig() TaskConfig
GetTaskType() string
}