chore: remove ~50k lines of unreachable dead code (#8913)

* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
This commit is contained in:
Chris Lu
2026-04-03 16:04:27 -07:00
committed by GitHub
parent 8fad85aed7
commit 995dfc4d5d
264 changed files with 62 additions and 46027 deletions

View File

@@ -1,9 +1,7 @@
package worker
import (
"fmt"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
@@ -15,334 +13,6 @@ type Registry struct {
mutex sync.RWMutex
}
// NewRegistry creates a new worker registry
func NewRegistry() *Registry {
return &Registry{
workers: make(map[string]*types.WorkerData),
stats: &types.RegistryStats{
TotalWorkers: 0,
ActiveWorkers: 0,
BusyWorkers: 0,
IdleWorkers: 0,
TotalTasks: 0,
CompletedTasks: 0,
FailedTasks: 0,
StartTime: time.Now(),
},
}
}
// RegisterWorker registers a new worker
func (r *Registry) RegisterWorker(worker *types.WorkerData) error {
r.mutex.Lock()
defer r.mutex.Unlock()
if _, exists := r.workers[worker.ID]; exists {
return fmt.Errorf("worker %s already registered", worker.ID)
}
r.workers[worker.ID] = worker
r.updateStats()
return nil
}
// UnregisterWorker removes a worker from the registry
func (r *Registry) UnregisterWorker(workerID string) error {
r.mutex.Lock()
defer r.mutex.Unlock()
if _, exists := r.workers[workerID]; !exists {
return fmt.Errorf("worker %s not found", workerID)
}
delete(r.workers, workerID)
r.updateStats()
return nil
}
// GetWorker returns a worker by ID
func (r *Registry) GetWorker(workerID string) (*types.WorkerData, bool) {
r.mutex.RLock()
defer r.mutex.RUnlock()
worker, exists := r.workers[workerID]
return worker, exists
}
// ListWorkers returns all registered workers
func (r *Registry) ListWorkers() []*types.WorkerData {
r.mutex.RLock()
defer r.mutex.RUnlock()
workers := make([]*types.WorkerData, 0, len(r.workers))
for _, worker := range r.workers {
workers = append(workers, worker)
}
return workers
}
// GetWorkersByCapability returns workers that support a specific capability
func (r *Registry) GetWorkersByCapability(capability types.TaskType) []*types.WorkerData {
r.mutex.RLock()
defer r.mutex.RUnlock()
var workers []*types.WorkerData
for _, worker := range r.workers {
for _, cap := range worker.Capabilities {
if cap == capability {
workers = append(workers, worker)
break
}
}
}
return workers
}
// GetAvailableWorkers returns workers that are available for new tasks
func (r *Registry) GetAvailableWorkers() []*types.WorkerData {
r.mutex.RLock()
defer r.mutex.RUnlock()
var workers []*types.WorkerData
for _, worker := range r.workers {
if worker.Status == "active" && worker.CurrentLoad < worker.MaxConcurrent {
workers = append(workers, worker)
}
}
return workers
}
// GetBestWorkerForTask returns the best worker for a specific task
func (r *Registry) GetBestWorkerForTask(taskType types.TaskType) *types.WorkerData {
r.mutex.RLock()
defer r.mutex.RUnlock()
var bestWorker *types.WorkerData
var bestScore float64
for _, worker := range r.workers {
// Check if worker supports this task type
supportsTask := false
for _, cap := range worker.Capabilities {
if cap == taskType {
supportsTask = true
break
}
}
if !supportsTask {
continue
}
// Check if worker is available
if worker.Status != "active" || worker.CurrentLoad >= worker.MaxConcurrent {
continue
}
// Calculate score based on current load and capacity
score := float64(worker.MaxConcurrent-worker.CurrentLoad) / float64(worker.MaxConcurrent)
if bestWorker == nil || score > bestScore {
bestWorker = worker
bestScore = score
}
}
return bestWorker
}
// UpdateWorkerHeartbeat updates the last heartbeat time for a worker
func (r *Registry) UpdateWorkerHeartbeat(workerID string) error {
r.mutex.Lock()
defer r.mutex.Unlock()
worker, exists := r.workers[workerID]
if !exists {
return fmt.Errorf("worker %s not found", workerID)
}
worker.LastHeartbeat = time.Now()
return nil
}
// UpdateWorkerLoad updates the current load for a worker
func (r *Registry) UpdateWorkerLoad(workerID string, load int) error {
r.mutex.Lock()
defer r.mutex.Unlock()
worker, exists := r.workers[workerID]
if !exists {
return fmt.Errorf("worker %s not found", workerID)
}
worker.CurrentLoad = load
if load >= worker.MaxConcurrent {
worker.Status = "busy"
} else {
worker.Status = "active"
}
r.updateStats()
return nil
}
// UpdateWorkerStatus updates the status of a worker
func (r *Registry) UpdateWorkerStatus(workerID string, status string) error {
r.mutex.Lock()
defer r.mutex.Unlock()
worker, exists := r.workers[workerID]
if !exists {
return fmt.Errorf("worker %s not found", workerID)
}
worker.Status = status
r.updateStats()
return nil
}
// CleanupStaleWorkers removes workers that haven't sent heartbeats recently
func (r *Registry) CleanupStaleWorkers(timeout time.Duration) int {
r.mutex.Lock()
defer r.mutex.Unlock()
var removedCount int
cutoff := time.Now().Add(-timeout)
for workerID, worker := range r.workers {
if worker.LastHeartbeat.Before(cutoff) {
delete(r.workers, workerID)
removedCount++
}
}
if removedCount > 0 {
r.updateStats()
}
return removedCount
}
// GetStats returns current registry statistics
func (r *Registry) GetStats() *types.RegistryStats {
r.mutex.RLock()
defer r.mutex.RUnlock()
// Create a copy of the stats to avoid race conditions
stats := *r.stats
return &stats
}
// updateStats updates the registry statistics (must be called with lock held)
func (r *Registry) updateStats() {
r.stats.TotalWorkers = len(r.workers)
r.stats.ActiveWorkers = 0
r.stats.BusyWorkers = 0
r.stats.IdleWorkers = 0
for _, worker := range r.workers {
switch worker.Status {
case "active":
if worker.CurrentLoad > 0 {
r.stats.ActiveWorkers++
} else {
r.stats.IdleWorkers++
}
case "busy":
r.stats.BusyWorkers++
}
}
r.stats.Uptime = time.Since(r.stats.StartTime)
r.stats.LastUpdated = time.Now()
}
// GetTaskCapabilities returns all task capabilities available in the registry
func (r *Registry) GetTaskCapabilities() []types.TaskType {
r.mutex.RLock()
defer r.mutex.RUnlock()
capabilitySet := make(map[types.TaskType]bool)
for _, worker := range r.workers {
for _, cap := range worker.Capabilities {
capabilitySet[cap] = true
}
}
var capabilities []types.TaskType
for cap := range capabilitySet {
capabilities = append(capabilities, cap)
}
return capabilities
}
// GetWorkersByStatus returns workers filtered by status
func (r *Registry) GetWorkersByStatus(status string) []*types.WorkerData {
r.mutex.RLock()
defer r.mutex.RUnlock()
var workers []*types.WorkerData
for _, worker := range r.workers {
if worker.Status == status {
workers = append(workers, worker)
}
}
return workers
}
// GetWorkerCount returns the total number of registered workers
func (r *Registry) GetWorkerCount() int {
r.mutex.RLock()
defer r.mutex.RUnlock()
return len(r.workers)
}
// GetWorkerIDs returns all worker IDs
func (r *Registry) GetWorkerIDs() []string {
r.mutex.RLock()
defer r.mutex.RUnlock()
ids := make([]string, 0, len(r.workers))
for id := range r.workers {
ids = append(ids, id)
}
return ids
}
// GetWorkerSummary returns a summary of all workers
func (r *Registry) GetWorkerSummary() *types.WorkerSummary {
r.mutex.RLock()
defer r.mutex.RUnlock()
summary := &types.WorkerSummary{
TotalWorkers: len(r.workers),
ByStatus: make(map[string]int),
ByCapability: make(map[types.TaskType]int),
TotalLoad: 0,
MaxCapacity: 0,
}
for _, worker := range r.workers {
summary.ByStatus[worker.Status]++
summary.TotalLoad += worker.CurrentLoad
summary.MaxCapacity += worker.MaxConcurrent
for _, cap := range worker.Capabilities {
summary.ByCapability[cap]++
}
}
return summary
}
// Default global registry instance
var defaultRegistry *Registry
var registryOnce sync.Once
// GetDefaultRegistry returns the default global registry
func GetDefaultRegistry() *Registry {
registryOnce.Do(func() {
defaultRegistry = NewRegistry()
})
return defaultRegistry
}

View File

@@ -1,138 +0,0 @@
package balance
import (
"sync"
"time"
)
// BalanceMetrics contains balance-specific monitoring data
type BalanceMetrics struct {
// Execution metrics
VolumesBalanced int64 `json:"volumes_balanced"`
TotalDataTransferred int64 `json:"total_data_transferred"`
AverageImbalance float64 `json:"average_imbalance"`
LastBalanceTime time.Time `json:"last_balance_time"`
// Performance metrics
AverageTransferSpeed float64 `json:"average_transfer_speed_mbps"`
TotalExecutionTime int64 `json:"total_execution_time_seconds"`
SuccessfulOperations int64 `json:"successful_operations"`
FailedOperations int64 `json:"failed_operations"`
// Current task metrics
CurrentImbalanceScore float64 `json:"current_imbalance_score"`
PlannedDestinations int `json:"planned_destinations"`
mutex sync.RWMutex
}
// NewBalanceMetrics creates a new balance metrics instance
func NewBalanceMetrics() *BalanceMetrics {
return &BalanceMetrics{
LastBalanceTime: time.Now(),
}
}
// RecordVolumeBalanced records a successful volume balance operation
func (m *BalanceMetrics) RecordVolumeBalanced(volumeSize int64, transferTime time.Duration) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.VolumesBalanced++
m.TotalDataTransferred += volumeSize
m.SuccessfulOperations++
m.LastBalanceTime = time.Now()
m.TotalExecutionTime += int64(transferTime.Seconds())
// Calculate average transfer speed (MB/s)
if transferTime > 0 {
speedMBps := float64(volumeSize) / (1024 * 1024) / transferTime.Seconds()
if m.AverageTransferSpeed == 0 {
m.AverageTransferSpeed = speedMBps
} else {
// Exponential moving average
m.AverageTransferSpeed = 0.8*m.AverageTransferSpeed + 0.2*speedMBps
}
}
}
// RecordFailure records a failed balance operation
func (m *BalanceMetrics) RecordFailure() {
m.mutex.Lock()
defer m.mutex.Unlock()
m.FailedOperations++
}
// UpdateImbalanceScore updates the current cluster imbalance score
func (m *BalanceMetrics) UpdateImbalanceScore(score float64) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.CurrentImbalanceScore = score
// Update average imbalance with exponential moving average
if m.AverageImbalance == 0 {
m.AverageImbalance = score
} else {
m.AverageImbalance = 0.9*m.AverageImbalance + 0.1*score
}
}
// SetPlannedDestinations sets the number of planned destinations
func (m *BalanceMetrics) SetPlannedDestinations(count int) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.PlannedDestinations = count
}
// GetMetrics returns a copy of the current metrics (without the mutex)
func (m *BalanceMetrics) GetMetrics() BalanceMetrics {
m.mutex.RLock()
defer m.mutex.RUnlock()
// Create a copy without the mutex to avoid copying lock value
return BalanceMetrics{
VolumesBalanced: m.VolumesBalanced,
TotalDataTransferred: m.TotalDataTransferred,
AverageImbalance: m.AverageImbalance,
LastBalanceTime: m.LastBalanceTime,
AverageTransferSpeed: m.AverageTransferSpeed,
TotalExecutionTime: m.TotalExecutionTime,
SuccessfulOperations: m.SuccessfulOperations,
FailedOperations: m.FailedOperations,
CurrentImbalanceScore: m.CurrentImbalanceScore,
PlannedDestinations: m.PlannedDestinations,
}
}
// GetSuccessRate returns the success rate as a percentage
func (m *BalanceMetrics) GetSuccessRate() float64 {
m.mutex.RLock()
defer m.mutex.RUnlock()
total := m.SuccessfulOperations + m.FailedOperations
if total == 0 {
return 100.0
}
return float64(m.SuccessfulOperations) / float64(total) * 100.0
}
// Reset resets all metrics to zero
func (m *BalanceMetrics) Reset() {
m.mutex.Lock()
defer m.mutex.Unlock()
*m = BalanceMetrics{
LastBalanceTime: time.Now(),
}
}
// Global metrics instance for balance tasks
var globalBalanceMetrics = NewBalanceMetrics()
// GetGlobalBalanceMetrics returns the global balance metrics instance
func GetGlobalBalanceMetrics() *BalanceMetrics {
return globalBalanceMetrics
}

View File

@@ -74,26 +74,6 @@ type GenericUIProvider struct {
taskDef *TaskDefinition
}
// GetTaskType returns the task type
func (ui *GenericUIProvider) GetTaskType() types.TaskType {
return ui.taskDef.Type
}
// GetDisplayName returns the human-readable name
func (ui *GenericUIProvider) GetDisplayName() string {
return ui.taskDef.DisplayName
}
// GetDescription returns a description of what this task does
func (ui *GenericUIProvider) GetDescription() string {
return ui.taskDef.Description
}
// GetIcon returns the icon CSS class for this task type
func (ui *GenericUIProvider) GetIcon() string {
return ui.taskDef.Icon
}
// GetCurrentConfig returns current config as TaskConfig
func (ui *GenericUIProvider) GetCurrentConfig() types.TaskConfig {
return ui.taskDef.Config

View File

@@ -2,8 +2,6 @@ package base
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/admin/config"
@@ -75,108 +73,6 @@ func (c *BaseConfig) Validate() error {
return nil
}
// StructToMap converts any struct to a map using reflection
func StructToMap(obj interface{}) map[string]interface{} {
result := make(map[string]interface{})
val := reflect.ValueOf(obj)
// Handle pointer to struct
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
if val.Kind() != reflect.Struct {
return result
}
typ := val.Type()
for i := 0; i < val.NumField(); i++ {
field := val.Field(i)
fieldType := typ.Field(i)
// Skip unexported fields
if !field.CanInterface() {
continue
}
// Handle embedded structs recursively (before JSON tag check)
if field.Kind() == reflect.Struct && fieldType.Anonymous {
embeddedMap := StructToMap(field.Interface())
for k, v := range embeddedMap {
result[k] = v
}
continue
}
// Get JSON tag name
jsonTag := fieldType.Tag.Get("json")
if jsonTag == "" || jsonTag == "-" {
continue
}
// Remove options like ",omitempty"
if commaIdx := strings.Index(jsonTag, ","); commaIdx >= 0 {
jsonTag = jsonTag[:commaIdx]
}
result[jsonTag] = field.Interface()
}
return result
}
// MapToStruct loads data from map into struct using reflection
func MapToStruct(data map[string]interface{}, obj interface{}) error {
val := reflect.ValueOf(obj)
// Must be pointer to struct
if val.Kind() != reflect.Ptr || val.Elem().Kind() != reflect.Struct {
return fmt.Errorf("obj must be pointer to struct")
}
val = val.Elem()
typ := val.Type()
for i := 0; i < val.NumField(); i++ {
field := val.Field(i)
fieldType := typ.Field(i)
// Skip unexported fields
if !field.CanSet() {
continue
}
// Handle embedded structs recursively (before JSON tag check)
if field.Kind() == reflect.Struct && fieldType.Anonymous {
err := MapToStruct(data, field.Addr().Interface())
if err != nil {
return err
}
continue
}
// Get JSON tag name
jsonTag := fieldType.Tag.Get("json")
if jsonTag == "" || jsonTag == "-" {
continue
}
// Remove options like ",omitempty"
if commaIdx := strings.Index(jsonTag, ","); commaIdx >= 0 {
jsonTag = jsonTag[:commaIdx]
}
if value, exists := data[jsonTag]; exists {
err := setFieldValue(field, value)
if err != nil {
return fmt.Errorf("failed to set field %s: %v", jsonTag, err)
}
}
}
return nil
}
// ToMap converts config to map using reflection
// ToTaskPolicy converts BaseConfig to protobuf (partial implementation)
// Note: Concrete implementations should override this to include task-specific config
@@ -207,66 +103,3 @@ func (c *BaseConfig) ApplySchemaDefaults(schema *config.Schema) error {
// Use reflection-based approach for BaseConfig since it needs to handle embedded structs
return schema.ApplyDefaultsToProtobuf(c)
}
// setFieldValue sets a field value with type conversion
func setFieldValue(field reflect.Value, value interface{}) error {
if value == nil {
return nil
}
valueVal := reflect.ValueOf(value)
fieldType := field.Type()
valueType := valueVal.Type()
// Direct assignment if types match
if valueType.AssignableTo(fieldType) {
field.Set(valueVal)
return nil
}
// Type conversion for common cases
switch fieldType.Kind() {
case reflect.Bool:
if b, ok := value.(bool); ok {
field.SetBool(b)
} else {
return fmt.Errorf("cannot convert %T to bool", value)
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v := value.(type) {
case int:
field.SetInt(int64(v))
case int32:
field.SetInt(int64(v))
case int64:
field.SetInt(v)
case float64:
field.SetInt(int64(v))
default:
return fmt.Errorf("cannot convert %T to int", value)
}
case reflect.Float32, reflect.Float64:
switch v := value.(type) {
case float32:
field.SetFloat(float64(v))
case float64:
field.SetFloat(v)
case int:
field.SetFloat(float64(v))
case int64:
field.SetFloat(float64(v))
default:
return fmt.Errorf("cannot convert %T to float", value)
}
case reflect.String:
if s, ok := value.(string); ok {
field.SetString(s)
} else {
return fmt.Errorf("cannot convert %T to string", value)
}
default:
return fmt.Errorf("unsupported field type %s", fieldType.Kind())
}
return nil
}

View File

@@ -1,338 +0,0 @@
package base
import (
"reflect"
"testing"
)
// Test structs that mirror the actual configuration structure
type TestBaseConfig struct {
Enabled bool `json:"enabled"`
ScanIntervalSeconds int `json:"scan_interval_seconds"`
MaxConcurrent int `json:"max_concurrent"`
}
type TestTaskConfig struct {
TestBaseConfig
TaskSpecificField float64 `json:"task_specific_field"`
AnotherSpecificField string `json:"another_specific_field"`
}
type TestNestedConfig struct {
TestBaseConfig
NestedStruct struct {
NestedField string `json:"nested_field"`
} `json:"nested_struct"`
TaskField int `json:"task_field"`
}
func TestStructToMap_WithEmbeddedStruct(t *testing.T) {
// Test case 1: Basic embedded struct
config := &TestTaskConfig{
TestBaseConfig: TestBaseConfig{
Enabled: true,
ScanIntervalSeconds: 1800,
MaxConcurrent: 3,
},
TaskSpecificField: 0.25,
AnotherSpecificField: "test_value",
}
result := StructToMap(config)
// Verify all fields are present
expectedFields := map[string]interface{}{
"enabled": true,
"scan_interval_seconds": 1800,
"max_concurrent": 3,
"task_specific_field": 0.25,
"another_specific_field": "test_value",
}
if len(result) != len(expectedFields) {
t.Errorf("Expected %d fields, got %d. Result: %+v", len(expectedFields), len(result), result)
}
for key, expectedValue := range expectedFields {
if actualValue, exists := result[key]; !exists {
t.Errorf("Missing field: %s", key)
} else if !reflect.DeepEqual(actualValue, expectedValue) {
t.Errorf("Field %s: expected %v (%T), got %v (%T)", key, expectedValue, expectedValue, actualValue, actualValue)
}
}
}
func TestStructToMap_WithNestedStruct(t *testing.T) {
config := &TestNestedConfig{
TestBaseConfig: TestBaseConfig{
Enabled: false,
ScanIntervalSeconds: 3600,
MaxConcurrent: 1,
},
NestedStruct: struct {
NestedField string `json:"nested_field"`
}{
NestedField: "nested_value",
},
TaskField: 42,
}
result := StructToMap(config)
// Verify embedded struct fields are included
if enabled, exists := result["enabled"]; !exists || enabled != false {
t.Errorf("Expected enabled=false from embedded struct, got %v", enabled)
}
if scanInterval, exists := result["scan_interval_seconds"]; !exists || scanInterval != 3600 {
t.Errorf("Expected scan_interval_seconds=3600 from embedded struct, got %v", scanInterval)
}
if maxConcurrent, exists := result["max_concurrent"]; !exists || maxConcurrent != 1 {
t.Errorf("Expected max_concurrent=1 from embedded struct, got %v", maxConcurrent)
}
// Verify regular fields are included
if taskField, exists := result["task_field"]; !exists || taskField != 42 {
t.Errorf("Expected task_field=42, got %v", taskField)
}
// Verify nested struct is included as a whole
if nestedStruct, exists := result["nested_struct"]; !exists {
t.Errorf("Missing nested_struct field")
} else {
// The nested struct should be included as-is, not flattened
if nested, ok := nestedStruct.(struct {
NestedField string `json:"nested_field"`
}); !ok || nested.NestedField != "nested_value" {
t.Errorf("Expected nested_struct with NestedField='nested_value', got %v", nestedStruct)
}
}
}
func TestMapToStruct_WithEmbeddedStruct(t *testing.T) {
// Test data with all fields including embedded struct fields
data := map[string]interface{}{
"enabled": true,
"scan_interval_seconds": 2400,
"max_concurrent": 5,
"task_specific_field": 0.15,
"another_specific_field": "updated_value",
}
config := &TestTaskConfig{}
err := MapToStruct(data, config)
if err != nil {
t.Fatalf("MapToStruct failed: %v", err)
}
// Verify embedded struct fields were set
if config.Enabled != true {
t.Errorf("Expected Enabled=true, got %v", config.Enabled)
}
if config.ScanIntervalSeconds != 2400 {
t.Errorf("Expected ScanIntervalSeconds=2400, got %v", config.ScanIntervalSeconds)
}
if config.MaxConcurrent != 5 {
t.Errorf("Expected MaxConcurrent=5, got %v", config.MaxConcurrent)
}
// Verify regular fields were set
if config.TaskSpecificField != 0.15 {
t.Errorf("Expected TaskSpecificField=0.15, got %v", config.TaskSpecificField)
}
if config.AnotherSpecificField != "updated_value" {
t.Errorf("Expected AnotherSpecificField='updated_value', got %v", config.AnotherSpecificField)
}
}
func TestMapToStruct_PartialData(t *testing.T) {
// Test with only some fields present (simulating form data)
data := map[string]interface{}{
"enabled": false,
"max_concurrent": 2,
"task_specific_field": 0.30,
}
// Start with some initial values
config := &TestTaskConfig{
TestBaseConfig: TestBaseConfig{
Enabled: true,
ScanIntervalSeconds: 1800,
MaxConcurrent: 1,
},
TaskSpecificField: 0.20,
AnotherSpecificField: "initial_value",
}
err := MapToStruct(data, config)
if err != nil {
t.Fatalf("MapToStruct failed: %v", err)
}
// Verify updated fields
if config.Enabled != false {
t.Errorf("Expected Enabled=false (updated), got %v", config.Enabled)
}
if config.MaxConcurrent != 2 {
t.Errorf("Expected MaxConcurrent=2 (updated), got %v", config.MaxConcurrent)
}
if config.TaskSpecificField != 0.30 {
t.Errorf("Expected TaskSpecificField=0.30 (updated), got %v", config.TaskSpecificField)
}
// Verify unchanged fields remain the same
if config.ScanIntervalSeconds != 1800 {
t.Errorf("Expected ScanIntervalSeconds=1800 (unchanged), got %v", config.ScanIntervalSeconds)
}
if config.AnotherSpecificField != "initial_value" {
t.Errorf("Expected AnotherSpecificField='initial_value' (unchanged), got %v", config.AnotherSpecificField)
}
}
func TestRoundTripSerialization(t *testing.T) {
// Test complete round-trip: struct -> map -> struct
original := &TestTaskConfig{
TestBaseConfig: TestBaseConfig{
Enabled: true,
ScanIntervalSeconds: 3600,
MaxConcurrent: 4,
},
TaskSpecificField: 0.18,
AnotherSpecificField: "round_trip_test",
}
// Convert to map
dataMap := StructToMap(original)
// Convert back to struct
roundTrip := &TestTaskConfig{}
err := MapToStruct(dataMap, roundTrip)
if err != nil {
t.Fatalf("Round-trip MapToStruct failed: %v", err)
}
// Verify all fields match
if !reflect.DeepEqual(original.TestBaseConfig, roundTrip.TestBaseConfig) {
t.Errorf("BaseConfig mismatch:\nOriginal: %+v\nRound-trip: %+v", original.TestBaseConfig, roundTrip.TestBaseConfig)
}
if original.TaskSpecificField != roundTrip.TaskSpecificField {
t.Errorf("TaskSpecificField mismatch: %v != %v", original.TaskSpecificField, roundTrip.TaskSpecificField)
}
if original.AnotherSpecificField != roundTrip.AnotherSpecificField {
t.Errorf("AnotherSpecificField mismatch: %v != %v", original.AnotherSpecificField, roundTrip.AnotherSpecificField)
}
}
func TestStructToMap_EmptyStruct(t *testing.T) {
config := &TestTaskConfig{}
result := StructToMap(config)
// Should still include all fields, even with zero values
expectedFields := []string{"enabled", "scan_interval_seconds", "max_concurrent", "task_specific_field", "another_specific_field"}
for _, field := range expectedFields {
if _, exists := result[field]; !exists {
t.Errorf("Missing field: %s", field)
}
}
}
func TestStructToMap_NilPointer(t *testing.T) {
var config *TestTaskConfig = nil
result := StructToMap(config)
if len(result) != 0 {
t.Errorf("Expected empty map for nil pointer, got %+v", result)
}
}
func TestMapToStruct_InvalidInput(t *testing.T) {
data := map[string]interface{}{
"enabled": "not_a_bool", // Wrong type
}
config := &TestTaskConfig{}
err := MapToStruct(data, config)
if err == nil {
t.Errorf("Expected error for invalid input type, but got none")
}
}
func TestMapToStruct_NonPointer(t *testing.T) {
data := map[string]interface{}{
"enabled": true,
}
config := TestTaskConfig{} // Not a pointer
err := MapToStruct(data, config)
if err == nil {
t.Errorf("Expected error for non-pointer input, but got none")
}
}
// Benchmark tests to ensure performance is reasonable
func BenchmarkStructToMap(b *testing.B) {
config := &TestTaskConfig{
TestBaseConfig: TestBaseConfig{
Enabled: true,
ScanIntervalSeconds: 1800,
MaxConcurrent: 3,
},
TaskSpecificField: 0.25,
AnotherSpecificField: "benchmark_test",
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = StructToMap(config)
}
}
func BenchmarkMapToStruct(b *testing.B) {
data := map[string]interface{}{
"enabled": true,
"scan_interval_seconds": 1800,
"max_concurrent": 3,
"task_specific_field": 0.25,
"another_specific_field": "benchmark_test",
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
config := &TestTaskConfig{}
_ = MapToStruct(data, config)
}
}
func BenchmarkRoundTrip(b *testing.B) {
original := &TestTaskConfig{
TestBaseConfig: TestBaseConfig{
Enabled: true,
ScanIntervalSeconds: 1800,
MaxConcurrent: 3,
},
TaskSpecificField: 0.25,
AnotherSpecificField: "benchmark_test",
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
dataMap := StructToMap(original)
roundTrip := &TestTaskConfig{}
_ = MapToStruct(dataMap, roundTrip)
}
}

View File

@@ -1,229 +0,0 @@
package erasure_coding
import (
"sync"
"time"
)
// ErasureCodingMetrics contains erasure coding-specific monitoring data
type ErasureCodingMetrics struct {
// Execution metrics
VolumesEncoded int64 `json:"volumes_encoded"`
TotalShardsCreated int64 `json:"total_shards_created"`
TotalDataProcessed int64 `json:"total_data_processed"`
TotalSourcesRemoved int64 `json:"total_sources_removed"`
LastEncodingTime time.Time `json:"last_encoding_time"`
// Performance metrics
AverageEncodingTime int64 `json:"average_encoding_time_seconds"`
AverageShardSize int64 `json:"average_shard_size"`
AverageDataShards int `json:"average_data_shards"`
AverageParityShards int `json:"average_parity_shards"`
SuccessfulOperations int64 `json:"successful_operations"`
FailedOperations int64 `json:"failed_operations"`
// Distribution metrics
ShardsPerDataCenter map[string]int64 `json:"shards_per_datacenter"`
ShardsPerRack map[string]int64 `json:"shards_per_rack"`
PlacementSuccessRate float64 `json:"placement_success_rate"`
// Current task metrics
CurrentVolumeSize int64 `json:"current_volume_size"`
CurrentShardCount int `json:"current_shard_count"`
VolumesPendingEncoding int `json:"volumes_pending_encoding"`
mutex sync.RWMutex
}
// NewErasureCodingMetrics creates a new erasure coding metrics instance
func NewErasureCodingMetrics() *ErasureCodingMetrics {
return &ErasureCodingMetrics{
LastEncodingTime: time.Now(),
ShardsPerDataCenter: make(map[string]int64),
ShardsPerRack: make(map[string]int64),
}
}
// RecordVolumeEncoded records a successful volume encoding operation
func (m *ErasureCodingMetrics) RecordVolumeEncoded(volumeSize int64, shardsCreated int, dataShards int, parityShards int, encodingTime time.Duration, sourceRemoved bool) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.VolumesEncoded++
m.TotalShardsCreated += int64(shardsCreated)
m.TotalDataProcessed += volumeSize
m.SuccessfulOperations++
m.LastEncodingTime = time.Now()
if sourceRemoved {
m.TotalSourcesRemoved++
}
// Update average encoding time
if m.AverageEncodingTime == 0 {
m.AverageEncodingTime = int64(encodingTime.Seconds())
} else {
// Exponential moving average
newTime := int64(encodingTime.Seconds())
m.AverageEncodingTime = (m.AverageEncodingTime*4 + newTime) / 5
}
// Update average shard size
if shardsCreated > 0 {
avgShardSize := volumeSize / int64(shardsCreated)
if m.AverageShardSize == 0 {
m.AverageShardSize = avgShardSize
} else {
m.AverageShardSize = (m.AverageShardSize*4 + avgShardSize) / 5
}
}
// Update average data/parity shards
if m.AverageDataShards == 0 {
m.AverageDataShards = dataShards
m.AverageParityShards = parityShards
} else {
m.AverageDataShards = (m.AverageDataShards*4 + dataShards) / 5
m.AverageParityShards = (m.AverageParityShards*4 + parityShards) / 5
}
}
// RecordFailure records a failed erasure coding operation
func (m *ErasureCodingMetrics) RecordFailure() {
m.mutex.Lock()
defer m.mutex.Unlock()
m.FailedOperations++
}
// RecordShardPlacement records shard placement for distribution tracking
func (m *ErasureCodingMetrics) RecordShardPlacement(dataCenter string, rack string) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.ShardsPerDataCenter[dataCenter]++
rackKey := dataCenter + ":" + rack
m.ShardsPerRack[rackKey]++
}
// UpdateCurrentVolumeInfo updates current volume processing information
func (m *ErasureCodingMetrics) UpdateCurrentVolumeInfo(volumeSize int64, shardCount int) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.CurrentVolumeSize = volumeSize
m.CurrentShardCount = shardCount
}
// SetVolumesPendingEncoding sets the number of volumes pending encoding
func (m *ErasureCodingMetrics) SetVolumesPendingEncoding(count int) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.VolumesPendingEncoding = count
}
// UpdatePlacementSuccessRate updates the placement success rate
func (m *ErasureCodingMetrics) UpdatePlacementSuccessRate(rate float64) {
m.mutex.Lock()
defer m.mutex.Unlock()
if m.PlacementSuccessRate == 0 {
m.PlacementSuccessRate = rate
} else {
// Exponential moving average
m.PlacementSuccessRate = 0.8*m.PlacementSuccessRate + 0.2*rate
}
}
// GetMetrics returns a copy of the current metrics (without the mutex)
func (m *ErasureCodingMetrics) GetMetrics() ErasureCodingMetrics {
m.mutex.RLock()
defer m.mutex.RUnlock()
// Create deep copy of maps
shardsPerDC := make(map[string]int64)
for k, v := range m.ShardsPerDataCenter {
shardsPerDC[k] = v
}
shardsPerRack := make(map[string]int64)
for k, v := range m.ShardsPerRack {
shardsPerRack[k] = v
}
// Create a copy without the mutex to avoid copying lock value
return ErasureCodingMetrics{
VolumesEncoded: m.VolumesEncoded,
TotalShardsCreated: m.TotalShardsCreated,
TotalDataProcessed: m.TotalDataProcessed,
TotalSourcesRemoved: m.TotalSourcesRemoved,
LastEncodingTime: m.LastEncodingTime,
AverageEncodingTime: m.AverageEncodingTime,
AverageShardSize: m.AverageShardSize,
AverageDataShards: m.AverageDataShards,
AverageParityShards: m.AverageParityShards,
SuccessfulOperations: m.SuccessfulOperations,
FailedOperations: m.FailedOperations,
ShardsPerDataCenter: shardsPerDC,
ShardsPerRack: shardsPerRack,
PlacementSuccessRate: m.PlacementSuccessRate,
CurrentVolumeSize: m.CurrentVolumeSize,
CurrentShardCount: m.CurrentShardCount,
VolumesPendingEncoding: m.VolumesPendingEncoding,
}
}
// GetSuccessRate returns the success rate as a percentage
func (m *ErasureCodingMetrics) GetSuccessRate() float64 {
m.mutex.RLock()
defer m.mutex.RUnlock()
total := m.SuccessfulOperations + m.FailedOperations
if total == 0 {
return 100.0
}
return float64(m.SuccessfulOperations) / float64(total) * 100.0
}
// GetAverageDataProcessed returns the average data processed per volume
func (m *ErasureCodingMetrics) GetAverageDataProcessed() float64 {
m.mutex.RLock()
defer m.mutex.RUnlock()
if m.VolumesEncoded == 0 {
return 0
}
return float64(m.TotalDataProcessed) / float64(m.VolumesEncoded)
}
// GetSourceRemovalRate returns the percentage of sources removed after encoding
func (m *ErasureCodingMetrics) GetSourceRemovalRate() float64 {
m.mutex.RLock()
defer m.mutex.RUnlock()
if m.VolumesEncoded == 0 {
return 0
}
return float64(m.TotalSourcesRemoved) / float64(m.VolumesEncoded) * 100.0
}
// Reset resets all metrics to zero
func (m *ErasureCodingMetrics) Reset() {
m.mutex.Lock()
defer m.mutex.Unlock()
*m = ErasureCodingMetrics{
LastEncodingTime: time.Now(),
ShardsPerDataCenter: make(map[string]int64),
ShardsPerRack: make(map[string]int64),
}
}
// Global metrics instance for erasure coding tasks
var globalErasureCodingMetrics = NewErasureCodingMetrics()
// GetGlobalErasureCodingMetrics returns the global erasure coding metrics instance
func GetGlobalErasureCodingMetrics() *ErasureCodingMetrics {
return globalErasureCodingMetrics
}

View File

@@ -64,51 +64,6 @@ func AutoRegisterUI(registerFunc func(*types.UIRegistry)) {
glog.V(1).Infof("Auto-registered task UI provider")
}
// SetDefaultCapabilitiesFromRegistry sets the default worker capabilities
// based on all registered task types
func SetDefaultCapabilitiesFromRegistry() {
typesRegistry := GetGlobalTypesRegistry()
var capabilities []types.TaskType
for taskType := range typesRegistry.GetAllDetectors() {
capabilities = append(capabilities, taskType)
}
// Set the default capabilities in the types package
types.SetDefaultCapabilities(capabilities)
glog.V(1).Infof("Set default worker capabilities from registry: %v", capabilities)
}
// BuildMaintenancePolicyFromTasks creates a maintenance policy with default configurations
// from all registered tasks using their UI providers
func BuildMaintenancePolicyFromTasks() *types.MaintenancePolicy {
policy := types.NewMaintenancePolicy()
// Get all registered task types from the UI registry
uiRegistry := GetGlobalUIRegistry()
for taskType, provider := range uiRegistry.GetAllProviders() {
// Get the default configuration from the UI provider
defaultConfig := provider.GetCurrentConfig()
// Set the configuration in the policy
policy.SetTaskConfig(taskType, defaultConfig)
glog.V(3).Infof("Added default config for task type %s to policy", taskType)
}
glog.V(2).Infof("Built maintenance policy with %d task configurations", len(policy.TaskConfigs))
return policy
}
// SetMaintenancePolicyFromTasks sets the default maintenance policy from registered tasks
func SetMaintenancePolicyFromTasks() {
// This function can be called to initialize the policy from registered tasks
// For now, we'll just log that this should be called by the integration layer
glog.V(1).Infof("SetMaintenancePolicyFromTasks called - policy should be built by the integration layer")
}
// TaskRegistry manages task factories
type TaskRegistry struct {
factories map[types.TaskType]types.TaskFactory

View File

@@ -36,16 +36,3 @@ func RegisterTaskConfigSchema(taskType string, provider TaskConfigSchemaProvider
defer globalSchemaRegistry.mutex.Unlock()
globalSchemaRegistry.providers[taskType] = provider
}
// GetTaskConfigSchema returns the schema for the specified task type
func GetTaskConfigSchema(taskType string) *TaskConfigSchema {
globalSchemaRegistry.mutex.RLock()
provider, exists := globalSchemaRegistry.providers[taskType]
globalSchemaRegistry.mutex.RUnlock()
if !exists {
return nil
}
return provider.GetConfigSchema()
}

View File

@@ -1,12 +1,9 @@
package tasks
import (
"context"
"fmt"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
@@ -26,353 +23,11 @@ type BaseTask struct {
currentStage string // Current stage description
}
// NewBaseTask creates a new base task
func NewBaseTask(taskType types.TaskType) *BaseTask {
return &BaseTask{
taskType: taskType,
progress: 0.0,
cancelled: false,
loggerConfig: DefaultTaskLoggerConfig(),
}
}
// NewBaseTaskWithLogger creates a new base task with custom logger configuration
func NewBaseTaskWithLogger(taskType types.TaskType, loggerConfig TaskLoggerConfig) *BaseTask {
return &BaseTask{
taskType: taskType,
progress: 0.0,
cancelled: false,
loggerConfig: loggerConfig,
}
}
// InitializeLogger initializes the task logger with task details
func (t *BaseTask) InitializeLogger(taskID string, workerID string, params types.TaskParams) error {
return t.InitializeTaskLogger(taskID, workerID, params)
}
// InitializeTaskLogger initializes the task logger with task details (LoggerProvider interface)
func (t *BaseTask) InitializeTaskLogger(taskID string, workerID string, params types.TaskParams) error {
t.mutex.Lock()
defer t.mutex.Unlock()
t.taskID = taskID
logger, err := NewTaskLogger(taskID, t.taskType, workerID, params, t.loggerConfig)
if err != nil {
return fmt.Errorf("failed to initialize task logger: %w", err)
}
t.logger = logger
t.logger.Info("BaseTask initialized for task %s (type: %s)", taskID, t.taskType)
return nil
}
// Type returns the task type
func (t *BaseTask) Type() types.TaskType {
return t.taskType
}
// GetProgress returns the current progress (0.0 to 100.0)
func (t *BaseTask) GetProgress() float64 {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.progress
}
// SetProgress sets the current progress and logs it
func (t *BaseTask) SetProgress(progress float64) {
t.mutex.Lock()
if progress < 0 {
progress = 0
}
if progress > 100 {
progress = 100
}
oldProgress := t.progress
callback := t.progressCallback
stage := t.currentStage
t.progress = progress
t.mutex.Unlock()
// Log progress change
if t.logger != nil && progress != oldProgress {
message := stage
if message == "" {
message = fmt.Sprintf("Progress updated from %.1f%% to %.1f%%", oldProgress, progress)
}
t.logger.LogProgress(progress, message)
}
// Call progress callback if set
if callback != nil && progress != oldProgress {
callback(progress, stage)
}
}
// SetProgressWithStage sets the current progress with a stage description
func (t *BaseTask) SetProgressWithStage(progress float64, stage string) {
t.mutex.Lock()
if progress < 0 {
progress = 0
}
if progress > 100 {
progress = 100
}
callback := t.progressCallback
t.progress = progress
t.currentStage = stage
t.mutex.Unlock()
// Log progress change
if t.logger != nil {
t.logger.LogProgress(progress, stage)
}
// Call progress callback if set
if callback != nil {
callback(progress, stage)
}
}
// SetCurrentStage sets the current stage description
func (t *BaseTask) SetCurrentStage(stage string) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.currentStage = stage
}
// GetCurrentStage returns the current stage description
func (t *BaseTask) GetCurrentStage() string {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.currentStage
}
// Cancel cancels the task
func (t *BaseTask) Cancel() error {
t.mutex.Lock()
defer t.mutex.Unlock()
if t.cancelled {
return nil
}
t.cancelled = true
if t.logger != nil {
t.logger.LogStatus("cancelled", "Task cancelled by request")
t.logger.Warning("Task %s was cancelled", t.taskID)
}
return nil
}
// IsCancelled returns whether the task is cancelled
func (t *BaseTask) IsCancelled() bool {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.cancelled
}
// SetStartTime sets the task start time
func (t *BaseTask) SetStartTime(startTime time.Time) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.startTime = startTime
if t.logger != nil {
t.logger.LogStatus("running", fmt.Sprintf("Task started at %s", startTime.Format(time.RFC3339)))
}
}
// GetStartTime returns the task start time
func (t *BaseTask) GetStartTime() time.Time {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.startTime
}
// SetEstimatedDuration sets the estimated duration
func (t *BaseTask) SetEstimatedDuration(duration time.Duration) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.estimatedDuration = duration
if t.logger != nil {
t.logger.LogWithFields("INFO", "Estimated duration set", map[string]interface{}{
"estimated_duration": duration.String(),
"estimated_seconds": duration.Seconds(),
})
}
}
// GetEstimatedDuration returns the estimated duration
func (t *BaseTask) GetEstimatedDuration() time.Duration {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.estimatedDuration
}
// SetProgressCallback sets the progress callback function
func (t *BaseTask) SetProgressCallback(callback func(float64, string)) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.progressCallback = callback
}
// SetLoggerConfig sets the logger configuration for this task
func (t *BaseTask) SetLoggerConfig(config TaskLoggerConfig) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.loggerConfig = config
}
// GetLogger returns the task logger
func (t *BaseTask) GetLogger() TaskLogger {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.logger
}
// GetTaskLogger returns the task logger (LoggerProvider interface)
func (t *BaseTask) GetTaskLogger() TaskLogger {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.logger
}
// LogInfo logs an info message
func (t *BaseTask) LogInfo(message string, args ...interface{}) {
if t.logger != nil {
t.logger.Info(message, args...)
}
}
// LogWarning logs a warning message
func (t *BaseTask) LogWarning(message string, args ...interface{}) {
if t.logger != nil {
t.logger.Warning(message, args...)
}
}
// LogError logs an error message
func (t *BaseTask) LogError(message string, args ...interface{}) {
if t.logger != nil {
t.logger.Error(message, args...)
}
}
// LogDebug logs a debug message
func (t *BaseTask) LogDebug(message string, args ...interface{}) {
if t.logger != nil {
t.logger.Debug(message, args...)
}
}
// LogWithFields logs a message with structured fields
func (t *BaseTask) LogWithFields(level string, message string, fields map[string]interface{}) {
if t.logger != nil {
t.logger.LogWithFields(level, message, fields)
}
}
// FinishTask finalizes the task and closes the logger
func (t *BaseTask) FinishTask(success bool, errorMsg string) error {
if t.logger != nil {
if success {
t.logger.LogStatus("completed", "Task completed successfully")
t.logger.Info("Task %s finished successfully", t.taskID)
} else {
t.logger.LogStatus("failed", fmt.Sprintf("Task failed: %s", errorMsg))
t.logger.Error("Task %s failed: %s", t.taskID, errorMsg)
}
// Close logger
if err := t.logger.Close(); err != nil {
glog.Errorf("Failed to close task logger: %v", err)
}
}
return nil
}
// ExecuteTask is a wrapper that handles common task execution logic with logging
func (t *BaseTask) ExecuteTask(ctx context.Context, params types.TaskParams, executor func(context.Context, types.TaskParams) error) error {
// Initialize logger if not already done
if t.logger == nil {
// Generate a temporary task ID if none provided
if t.taskID == "" {
t.taskID = fmt.Sprintf("task_%d", time.Now().UnixNano())
}
workerID := "unknown"
if err := t.InitializeLogger(t.taskID, workerID, params); err != nil {
glog.Warningf("Failed to initialize task logger: %v", err)
}
}
t.SetStartTime(time.Now())
t.SetProgress(0)
if t.logger != nil {
t.logger.LogWithFields("INFO", "Task execution started", map[string]interface{}{
"volume_id": params.VolumeID,
"server": getServerFromSources(params.TypedParams.Sources),
"collection": params.Collection,
})
}
// Create a context that can be cancelled
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Monitor for cancellation
go func() {
for !t.IsCancelled() {
select {
case <-ctx.Done():
return
case <-time.After(time.Second):
// Check cancellation every second
}
}
t.LogWarning("Task cancellation detected, cancelling context")
cancel()
}()
// Execute the actual task
t.LogInfo("Starting task executor")
err := executor(ctx, params)
if err != nil {
t.LogError("Task executor failed: %v", err)
t.FinishTask(false, err.Error())
return err
}
if t.IsCancelled() {
t.LogWarning("Task was cancelled during execution")
t.FinishTask(false, "cancelled")
return context.Canceled
}
t.SetProgress(100)
t.LogInfo("Task executor completed successfully")
t.FinishTask(true, "")
return nil
}
// UnsupportedTaskTypeError represents an error for unsupported task types
type UnsupportedTaskTypeError struct {
TaskType types.TaskType
}
func (e *UnsupportedTaskTypeError) Error() string {
return "unsupported task type: " + string(e.TaskType)
}
// BaseTaskFactory provides common functionality for task factories
type BaseTaskFactory struct {
taskType types.TaskType
@@ -399,37 +54,12 @@ func (f *BaseTaskFactory) Description() string {
return f.description
}
// ValidateParams validates task parameters
func ValidateParams(params types.TaskParams, requiredFields ...string) error {
for _, field := range requiredFields {
switch field {
case "volume_id":
if params.VolumeID == 0 {
return &ValidationError{Field: field, Message: "volume_id is required"}
}
case "server":
if len(params.TypedParams.Sources) == 0 {
return &ValidationError{Field: field, Message: "server is required"}
}
case "collection":
if params.Collection == "" {
return &ValidationError{Field: field, Message: "collection is required"}
}
}
}
return nil
}
// ValidationError represents a parameter validation error
type ValidationError struct {
Field string
Message string
}
func (e *ValidationError) Error() string {
return e.Field + ": " + e.Message
}
// getServerFromSources extracts the server address from unified sources
func getServerFromSources(sources []*worker_pb.TaskSource) string {
if len(sources) > 0 {

View File

@@ -223,36 +223,3 @@ func (h *TaskLogHandler) readTaskLogEntries(logDir string, request *worker_pb.Ta
return pbEntries, nil
}
// ListAvailableTaskLogs returns a list of available task log directories
func (h *TaskLogHandler) ListAvailableTaskLogs() ([]string, error) {
entries, err := os.ReadDir(h.baseLogDir)
if err != nil {
return nil, fmt.Errorf("failed to read base log directory: %w", err)
}
var taskDirs []string
for _, entry := range entries {
if entry.IsDir() {
taskDirs = append(taskDirs, entry.Name())
}
}
return taskDirs, nil
}
// CleanupOldLogs removes old task logs beyond the specified limit
func (h *TaskLogHandler) CleanupOldLogs(maxTasks int) error {
config := TaskLoggerConfig{
BaseLogDir: h.baseLogDir,
MaxTasks: maxTasks,
}
// Create a temporary logger to trigger cleanup
tempLogger := &FileTaskLogger{
config: config,
}
tempLogger.cleanupOldLogs()
return nil
}

View File

@@ -1,9 +1,6 @@
package tasks
import (
"reflect"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
"github.com/seaweedfs/seaweedfs/weed/worker/types"
)
@@ -85,100 +82,5 @@ type CommonConfigGetter[T any] struct {
schedulerFunc func() T
}
// NewCommonConfigGetter creates a new common config getter
func NewCommonConfigGetter[T any](
defaultConfig T,
detectorFunc func() T,
schedulerFunc func() T,
) *CommonConfigGetter[T] {
return &CommonConfigGetter[T]{
defaultConfig: defaultConfig,
detectorFunc: detectorFunc,
schedulerFunc: schedulerFunc,
}
}
// GetConfig returns the merged configuration
func (cg *CommonConfigGetter[T]) GetConfig() T {
config := cg.defaultConfig
// Apply detector values if available
if cg.detectorFunc != nil {
detectorConfig := cg.detectorFunc()
mergeConfigs(&config, detectorConfig)
}
// Apply scheduler values if available
if cg.schedulerFunc != nil {
schedulerConfig := cg.schedulerFunc()
mergeConfigs(&config, schedulerConfig)
}
return config
}
// mergeConfigs merges non-zero values from source into dest
func mergeConfigs[T any](dest *T, source T) {
destValue := reflect.ValueOf(dest).Elem()
sourceValue := reflect.ValueOf(source)
if destValue.Kind() != reflect.Struct || sourceValue.Kind() != reflect.Struct {
return
}
for i := 0; i < destValue.NumField(); i++ {
destField := destValue.Field(i)
sourceField := sourceValue.Field(i)
if !destField.CanSet() {
continue
}
// Only copy non-zero values
if !sourceField.IsZero() {
if destField.Type() == sourceField.Type() {
destField.Set(sourceField)
}
}
}
}
// RegisterUIFunc provides a common registration function signature
type RegisterUIFunc[D, S any] func(uiRegistry *types.UIRegistry, detector D, scheduler S)
// CommonRegisterUI provides a common registration implementation
func CommonRegisterUI[D, S any](
taskType types.TaskType,
displayName string,
uiRegistry *types.UIRegistry,
detector D,
scheduler S,
schemaFunc func() *TaskConfigSchema,
configFunc func() types.TaskConfig,
applyTaskPolicyFunc func(policy *worker_pb.TaskPolicy) error,
applyTaskConfigFunc func(config types.TaskConfig) error,
) {
// Get metadata from schema
schema := schemaFunc()
description := "Task configuration"
icon := "fas fa-cog"
if schema != nil {
description = schema.Description
icon = schema.Icon
}
uiProvider := NewBaseUIProvider(
taskType,
displayName,
description,
icon,
schemaFunc,
configFunc,
applyTaskPolicyFunc,
applyTaskConfigFunc,
)
uiRegistry.RegisterUI(uiProvider)
glog.V(1).Infof("Registered %s task UI provider", taskType)
}

View File

@@ -1,20 +0,0 @@
package util
import "strings"
// ParseCSVSet splits a comma-separated string into a set of trimmed,
// non-empty values. Returns nil if the input is empty.
func ParseCSVSet(csv string) map[string]bool {
csv = strings.TrimSpace(csv)
if csv == "" {
return nil
}
set := make(map[string]bool)
for _, item := range strings.Split(csv, ",") {
trimmed := strings.TrimSpace(item)
if trimmed != "" {
set[trimmed] = true
}
}
return set
}

View File

@@ -1,151 +0,0 @@
package vacuum
import (
"sync"
"time"
)
// VacuumMetrics contains vacuum-specific monitoring data
type VacuumMetrics struct {
// Execution metrics
VolumesVacuumed int64 `json:"volumes_vacuumed"`
TotalSpaceReclaimed int64 `json:"total_space_reclaimed"`
TotalFilesProcessed int64 `json:"total_files_processed"`
TotalGarbageCollected int64 `json:"total_garbage_collected"`
LastVacuumTime time.Time `json:"last_vacuum_time"`
// Performance metrics
AverageVacuumTime int64 `json:"average_vacuum_time_seconds"`
AverageGarbageRatio float64 `json:"average_garbage_ratio"`
SuccessfulOperations int64 `json:"successful_operations"`
FailedOperations int64 `json:"failed_operations"`
// Current task metrics
CurrentGarbageRatio float64 `json:"current_garbage_ratio"`
VolumesPendingVacuum int `json:"volumes_pending_vacuum"`
mutex sync.RWMutex
}
// NewVacuumMetrics creates a new vacuum metrics instance
func NewVacuumMetrics() *VacuumMetrics {
return &VacuumMetrics{
LastVacuumTime: time.Now(),
}
}
// RecordVolumeVacuumed records a successful volume vacuum operation
func (m *VacuumMetrics) RecordVolumeVacuumed(spaceReclaimed int64, filesProcessed int64, garbageCollected int64, vacuumTime time.Duration, garbageRatio float64) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.VolumesVacuumed++
m.TotalSpaceReclaimed += spaceReclaimed
m.TotalFilesProcessed += filesProcessed
m.TotalGarbageCollected += garbageCollected
m.SuccessfulOperations++
m.LastVacuumTime = time.Now()
// Update average vacuum time
if m.AverageVacuumTime == 0 {
m.AverageVacuumTime = int64(vacuumTime.Seconds())
} else {
// Exponential moving average
newTime := int64(vacuumTime.Seconds())
m.AverageVacuumTime = (m.AverageVacuumTime*4 + newTime) / 5
}
// Update average garbage ratio
if m.AverageGarbageRatio == 0 {
m.AverageGarbageRatio = garbageRatio
} else {
// Exponential moving average
m.AverageGarbageRatio = 0.8*m.AverageGarbageRatio + 0.2*garbageRatio
}
}
// RecordFailure records a failed vacuum operation
func (m *VacuumMetrics) RecordFailure() {
m.mutex.Lock()
defer m.mutex.Unlock()
m.FailedOperations++
}
// UpdateCurrentGarbageRatio updates the current volume's garbage ratio
func (m *VacuumMetrics) UpdateCurrentGarbageRatio(ratio float64) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.CurrentGarbageRatio = ratio
}
// SetVolumesPendingVacuum sets the number of volumes pending vacuum
func (m *VacuumMetrics) SetVolumesPendingVacuum(count int) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.VolumesPendingVacuum = count
}
// GetMetrics returns a copy of the current metrics (without the mutex)
func (m *VacuumMetrics) GetMetrics() VacuumMetrics {
m.mutex.RLock()
defer m.mutex.RUnlock()
// Create a copy without the mutex to avoid copying lock value
return VacuumMetrics{
VolumesVacuumed: m.VolumesVacuumed,
TotalSpaceReclaimed: m.TotalSpaceReclaimed,
TotalFilesProcessed: m.TotalFilesProcessed,
TotalGarbageCollected: m.TotalGarbageCollected,
LastVacuumTime: m.LastVacuumTime,
AverageVacuumTime: m.AverageVacuumTime,
AverageGarbageRatio: m.AverageGarbageRatio,
SuccessfulOperations: m.SuccessfulOperations,
FailedOperations: m.FailedOperations,
CurrentGarbageRatio: m.CurrentGarbageRatio,
VolumesPendingVacuum: m.VolumesPendingVacuum,
}
}
// GetSuccessRate returns the success rate as a percentage
func (m *VacuumMetrics) GetSuccessRate() float64 {
m.mutex.RLock()
defer m.mutex.RUnlock()
total := m.SuccessfulOperations + m.FailedOperations
if total == 0 {
return 100.0
}
return float64(m.SuccessfulOperations) / float64(total) * 100.0
}
// GetAverageSpaceReclaimed returns the average space reclaimed per volume
func (m *VacuumMetrics) GetAverageSpaceReclaimed() float64 {
m.mutex.RLock()
defer m.mutex.RUnlock()
if m.VolumesVacuumed == 0 {
return 0
}
return float64(m.TotalSpaceReclaimed) / float64(m.VolumesVacuumed)
}
// Reset resets all metrics to zero
func (m *VacuumMetrics) Reset() {
m.mutex.Lock()
defer m.mutex.Unlock()
*m = VacuumMetrics{
LastVacuumTime: time.Now(),
}
}
// Global metrics instance for vacuum tasks
var globalVacuumMetrics = NewVacuumMetrics()
// GetGlobalVacuumMetrics returns the global vacuum metrics instance
func GetGlobalVacuumMetrics() *VacuumMetrics {
return globalVacuumMetrics
}

View File

@@ -109,15 +109,6 @@ type MaintenanceWorkersData struct {
var defaultCapabilities []TaskType
var defaultCapabilitiesMutex sync.RWMutex
// SetDefaultCapabilities sets the default capabilities for workers
// This should be called after task registration is complete
func SetDefaultCapabilities(capabilities []TaskType) {
defaultCapabilitiesMutex.Lock()
defer defaultCapabilitiesMutex.Unlock()
defaultCapabilities = make([]TaskType, len(capabilities))
copy(defaultCapabilities, capabilities)
}
// GetDefaultCapabilities returns the default capabilities for workers
func GetDefaultCapabilities() []TaskType {
defaultCapabilitiesMutex.RLock()
@@ -129,18 +120,6 @@ func GetDefaultCapabilities() []TaskType {
return result
}
// DefaultMaintenanceConfig returns default maintenance configuration
func DefaultMaintenanceConfig() *MaintenanceConfig {
return &MaintenanceConfig{
Enabled: true,
ScanInterval: 30 * time.Minute,
CleanInterval: 6 * time.Hour,
TaskRetention: 7 * 24 * time.Hour, // 7 days
WorkerTimeout: 5 * time.Minute,
Policy: NewMaintenancePolicy(),
}
}
// DefaultWorkerConfig returns default worker configuration
func DefaultWorkerConfig() *WorkerConfig {
// Get dynamic capabilities from registered task types
@@ -154,119 +133,3 @@ func DefaultWorkerConfig() *WorkerConfig {
Capabilities: capabilities,
}
}
// NewMaintenancePolicy creates a new dynamic maintenance policy
func NewMaintenancePolicy() *MaintenancePolicy {
return &MaintenancePolicy{
TaskConfigs: make(map[TaskType]interface{}),
GlobalSettings: &GlobalMaintenanceSettings{
DefaultMaxConcurrent: 2,
MaintenanceEnabled: true,
DefaultScanInterval: 30 * time.Minute,
DefaultTaskTimeout: 5 * time.Minute,
DefaultRetryCount: 3,
DefaultRetryInterval: 5 * time.Minute,
DefaultPriorityBoostAge: 24 * time.Hour,
GlobalConcurrentLimit: 5,
},
}
}
// SetTaskConfig sets the configuration for a specific task type
func (p *MaintenancePolicy) SetTaskConfig(taskType TaskType, config interface{}) {
if p.TaskConfigs == nil {
p.TaskConfigs = make(map[TaskType]interface{})
}
p.TaskConfigs[taskType] = config
}
// GetTaskConfig returns the configuration for a specific task type
func (p *MaintenancePolicy) GetTaskConfig(taskType TaskType) interface{} {
if p.TaskConfigs == nil {
return nil
}
return p.TaskConfigs[taskType]
}
// IsTaskEnabled returns whether a task type is enabled (generic helper)
func (p *MaintenancePolicy) IsTaskEnabled(taskType TaskType) bool {
if !p.GlobalSettings.MaintenanceEnabled {
return false
}
config := p.GetTaskConfig(taskType)
if config == nil {
return false
}
// Try to get enabled field from config using type assertion
if configMap, ok := config.(map[string]interface{}); ok {
if enabled, exists := configMap["enabled"]; exists {
if enabledBool, ok := enabled.(bool); ok {
return enabledBool
}
}
}
// If we can't determine from config, default to global setting
return p.GlobalSettings.MaintenanceEnabled
}
// GetMaxConcurrent returns the max concurrent setting for a task type
func (p *MaintenancePolicy) GetMaxConcurrent(taskType TaskType) int {
config := p.GetTaskConfig(taskType)
if config == nil {
return p.GlobalSettings.DefaultMaxConcurrent
}
// Try to get max_concurrent field from config
if configMap, ok := config.(map[string]interface{}); ok {
if maxConcurrent, exists := configMap["max_concurrent"]; exists {
if maxConcurrentInt, ok := maxConcurrent.(int); ok {
return maxConcurrentInt
}
if maxConcurrentFloat, ok := maxConcurrent.(float64); ok {
return int(maxConcurrentFloat)
}
}
}
return p.GlobalSettings.DefaultMaxConcurrent
}
// GetScanInterval returns the scan interval for a task type
func (p *MaintenancePolicy) GetScanInterval(taskType TaskType) time.Duration {
config := p.GetTaskConfig(taskType)
if config == nil {
return p.GlobalSettings.DefaultScanInterval
}
// Try to get scan_interval field from config
if configMap, ok := config.(map[string]interface{}); ok {
if scanInterval, exists := configMap["scan_interval"]; exists {
if scanIntervalDuration, ok := scanInterval.(time.Duration); ok {
return scanIntervalDuration
}
if scanIntervalString, ok := scanInterval.(string); ok {
if duration, err := time.ParseDuration(scanIntervalString); err == nil {
return duration
}
}
}
}
return p.GlobalSettings.DefaultScanInterval
}
// GetAllTaskTypes returns all configured task types
func (p *MaintenancePolicy) GetAllTaskTypes() []TaskType {
if p.TaskConfigs == nil {
return []TaskType{}
}
taskTypes := make([]TaskType, 0, len(p.TaskConfigs))
for taskType := range p.TaskConfigs {
taskTypes = append(taskTypes, taskType)
}
return taskTypes
}

View File

@@ -53,14 +53,6 @@ type Logger interface {
// NoOpLogger is a logger that does nothing (silent)
type NoOpLogger struct{}
func (l *NoOpLogger) Info(msg string, args ...interface{}) {}
func (l *NoOpLogger) Warning(msg string, args ...interface{}) {}
func (l *NoOpLogger) Error(msg string, args ...interface{}) {}
func (l *NoOpLogger) Debug(msg string, args ...interface{}) {}
func (l *NoOpLogger) WithFields(fields map[string]interface{}) Logger {
return l // Return self since we're doing nothing anyway
}
// GlogFallbackLogger is a logger that falls back to glog
type GlogFallbackLogger struct{}
@@ -137,87 +129,3 @@ type UnifiedBaseTask struct {
currentStage string
workingDir string
}
// NewBaseTask creates a new base task
func NewUnifiedBaseTask(id string, taskType TaskType) *UnifiedBaseTask {
return &UnifiedBaseTask{
id: id,
taskType: taskType,
}
}
// ID returns the task ID
func (t *UnifiedBaseTask) ID() string {
return t.id
}
// Type returns the task type
func (t *UnifiedBaseTask) Type() TaskType {
return t.taskType
}
// SetProgressCallback sets the progress callback
func (t *UnifiedBaseTask) SetProgressCallback(callback func(float64, string)) {
t.progressCallback = callback
}
// ReportProgress reports current progress through the callback
func (t *UnifiedBaseTask) ReportProgress(progress float64) {
if t.progressCallback != nil {
t.progressCallback(progress, t.currentStage)
}
}
// ReportProgressWithStage reports current progress with a specific stage description
func (t *UnifiedBaseTask) ReportProgressWithStage(progress float64, stage string) {
t.currentStage = stage
if t.progressCallback != nil {
t.progressCallback(progress, stage)
}
}
// SetCurrentStage sets the current stage description
func (t *UnifiedBaseTask) SetCurrentStage(stage string) {
t.currentStage = stage
}
// GetCurrentStage returns the current stage description
func (t *UnifiedBaseTask) GetCurrentStage() string {
return t.currentStage
}
// Cancel marks the task as cancelled
func (t *UnifiedBaseTask) Cancel() error {
t.cancelled = true
return nil
}
// IsCancellable returns true if the task can be cancelled
func (t *UnifiedBaseTask) IsCancellable() bool {
return true
}
// IsCancelled returns true if the task has been cancelled
func (t *UnifiedBaseTask) IsCancelled() bool {
return t.cancelled
}
// SetLogger sets the task logger
func (t *UnifiedBaseTask) SetLogger(logger Logger) {
t.logger = logger
}
// GetLogger returns the task logger
func (t *UnifiedBaseTask) GetLogger() Logger {
return t.logger
}
// SetWorkingDir sets the task working directory
func (t *UnifiedBaseTask) SetWorkingDir(workingDir string) {
t.workingDir = workingDir
}
// GetWorkingDir returns the task working directory
func (t *UnifiedBaseTask) GetWorkingDir() string {
return t.workingDir
}

View File

@@ -6,47 +6,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
)
// Helper function to convert seconds to the most appropriate interval unit
func secondsToIntervalValueUnit(totalSeconds int) (int, string) {
if totalSeconds == 0 {
return 0, "minute"
}
// Preserve seconds when not divisible by minutes
if totalSeconds < 60 || totalSeconds%60 != 0 {
return totalSeconds, "second"
}
// Check if it's evenly divisible by days
if totalSeconds%(24*3600) == 0 {
return totalSeconds / (24 * 3600), "day"
}
// Check if it's evenly divisible by hours
if totalSeconds%3600 == 0 {
return totalSeconds / 3600, "hour"
}
// Default to minutes
return totalSeconds / 60, "minute"
}
// Helper function to convert interval value and unit to seconds
func IntervalValueUnitToSeconds(value int, unit string) int {
switch unit {
case "day":
return value * 24 * 3600
case "hour":
return value * 3600
case "minute":
return value * 60
case "second":
return value
default:
return value * 60 // Default to minutes
}
}
// TaskConfig defines the interface for task configurations
// This matches the interfaces used in base package and handlers
type TaskConfig interface {

View File

@@ -90,24 +90,6 @@ func (r *TypedTaskRegistry) RegisterTypedTask(taskType TaskType, creator TypedTa
r.creators[taskType] = creator
}
// CreateTypedTask creates a new typed task instance
func (r *TypedTaskRegistry) CreateTypedTask(taskType TaskType) (TypedTaskInterface, error) {
creator, exists := r.creators[taskType]
if !exists {
return nil, ErrTaskTypeNotFound
}
return creator(), nil
}
// GetSupportedTypes returns all registered typed task types
func (r *TypedTaskRegistry) GetSupportedTypes() []TaskType {
types := make([]TaskType, 0, len(r.creators))
for taskType := range r.creators {
types = append(types, taskType)
}
return types
}
// Global typed task registry
var globalTypedTaskRegistry = NewTypedTaskRegistry()
@@ -115,8 +97,3 @@ var globalTypedTaskRegistry = NewTypedTaskRegistry()
func RegisterGlobalTypedTask(taskType TaskType, creator TypedTaskCreator) {
globalTypedTaskRegistry.RegisterTypedTask(taskType, creator)
}
// GetGlobalTypedTaskRegistry returns the global typed task registry
func GetGlobalTypedTaskRegistry() *TypedTaskRegistry {
return globalTypedTaskRegistry
}

View File

@@ -30,47 +30,3 @@ type BaseWorker struct {
currentTasks map[string]Task
logger Logger
}
// NewBaseWorker creates a new base worker
func NewBaseWorker(id string) *BaseWorker {
return &BaseWorker{
id: id,
currentTasks: make(map[string]Task),
}
}
// Configure applies worker configuration
func (w *BaseWorker) Configure(config WorkerCreationConfig) error {
w.id = config.ID
w.capabilities = config.Capabilities
w.maxConcurrent = config.MaxConcurrent
if config.LoggerFactory != nil {
logger, err := config.LoggerFactory.CreateLogger(context.Background(), LoggerConfig{
ServiceName: "worker-" + w.id,
MinLevel: LogLevelInfo,
})
if err != nil {
return err
}
w.logger = logger
}
return nil
}
// GetCapabilities returns worker capabilities
func (w *BaseWorker) GetCapabilities() []TaskType {
return w.capabilities
}
// GetStatus returns current worker status
func (w *BaseWorker) GetStatus() WorkerStatus {
return WorkerStatus{
WorkerID: w.id,
Status: "active",
Capabilities: w.capabilities,
MaxConcurrent: w.maxConcurrent,
CurrentLoad: len(w.currentTasks),
}
}

View File

@@ -383,31 +383,6 @@ func (w *Worker) setReqTick(tick *time.Ticker) *time.Ticker {
return w.getReqTick()
}
func (w *Worker) getStartTime() time.Time {
respCh := make(chan time.Time, 1)
w.cmds <- workerCommand{
action: ActionGetStartTime,
data: respCh,
}
return <-respCh
}
func (w *Worker) getCompletedTasks() int {
respCh := make(chan int, 1)
w.cmds <- workerCommand{
action: ActionGetCompletedTasks,
data: respCh,
}
return <-respCh
}
func (w *Worker) getFailedTasks() int {
respCh := make(chan int, 1)
w.cmds <- workerCommand{
action: ActionGetFailedTasks,
data: respCh,
}
return <-respCh
}
// getTaskLoggerConfig returns the task logger configuration with worker's log directory
func (w *Worker) getTaskLoggerConfig() tasks.TaskLoggerConfig {
config := tasks.DefaultTaskLoggerConfig()
@@ -543,27 +518,6 @@ func (w *Worker) handleStop(cmd workerCommand) {
cmd.resp <- nil
}
// RegisterTask registers a task factory
func (w *Worker) RegisterTask(taskType types.TaskType, factory types.TaskFactory) {
w.registry.Register(taskType, factory)
}
// GetCapabilities returns the worker capabilities
func (w *Worker) GetCapabilities() []types.TaskType {
return w.config.Capabilities
}
// GetStatus returns the current worker status
func (w *Worker) GetStatus() types.WorkerStatus {
respCh := make(statusResponse, 1)
w.cmds <- workerCommand{
action: ActionGetStatus,
data: respCh,
resp: nil,
}
return <-respCh
}
// HandleTask handles a task execution
func (w *Worker) HandleTask(task *types.TaskInput) error {
glog.V(1).Infof("Worker %s received task %s (type: %s, volume: %d)",
@@ -579,26 +533,6 @@ func (w *Worker) HandleTask(task *types.TaskInput) error {
return nil
}
// SetCapabilities sets the worker capabilities
func (w *Worker) SetCapabilities(capabilities []types.TaskType) {
w.config.Capabilities = capabilities
}
// SetMaxConcurrent sets the maximum concurrent tasks
func (w *Worker) SetMaxConcurrent(max int) {
w.config.MaxConcurrent = max
}
// SetHeartbeatInterval sets the heartbeat interval
func (w *Worker) SetHeartbeatInterval(interval time.Duration) {
w.config.HeartbeatInterval = interval
}
// SetTaskRequestInterval sets the task request interval
func (w *Worker) SetTaskRequestInterval(interval time.Duration) {
w.config.TaskRequestInterval = interval
}
// SetAdminClient sets the admin client
func (w *Worker) SetAdminClient(client AdminClient) {
w.cmds <- workerCommand{
@@ -828,11 +762,6 @@ func (w *Worker) requestTasks() {
}
}
// GetTaskRegistry returns the task registry
func (w *Worker) GetTaskRegistry() *tasks.TaskRegistry {
return w.registry
}
// connectionMonitorLoop monitors connection status
func (w *Worker) connectionMonitorLoop() {
ticker := time.NewTicker(30 * time.Second) // Check every 30 seconds
@@ -867,34 +796,6 @@ func (w *Worker) connectionMonitorLoop() {
}
}
// GetConfig returns the worker configuration
func (w *Worker) GetConfig() *types.WorkerConfig {
return w.config
}
// GetPerformanceMetrics returns performance metrics
func (w *Worker) GetPerformanceMetrics() *types.WorkerPerformance {
uptime := time.Since(w.getStartTime())
var successRate float64
totalTasks := w.getCompletedTasks() + w.getFailedTasks()
if totalTasks > 0 {
successRate = float64(w.getCompletedTasks()) / float64(totalTasks) * 100
}
return &types.WorkerPerformance{
TasksCompleted: w.getCompletedTasks(),
TasksFailed: w.getFailedTasks(),
AverageTaskTime: 0, // Would need to track this
Uptime: uptime,
SuccessRate: successRate,
}
}
func (w *Worker) GetAdmin() AdminClient {
return w.getAdmin()
}
// messageProcessingLoop processes incoming admin messages
func (w *Worker) messageProcessingLoop() {
glog.Infof("MESSAGE LOOP STARTED: Worker %s message processing loop started", w.id)