chore: remove ~50k lines of unreachable dead code (#8913)

* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
This commit is contained in:
Chris Lu
2026-04-03 16:04:27 -07:00
committed by GitHub
parent 8fad85aed7
commit 995dfc4d5d
264 changed files with 62 additions and 46027 deletions

View File

@@ -74,26 +74,6 @@ type GenericUIProvider struct {
taskDef *TaskDefinition
}
// GetTaskType returns the task type
func (ui *GenericUIProvider) GetTaskType() types.TaskType {
return ui.taskDef.Type
}
// GetDisplayName returns the human-readable name
func (ui *GenericUIProvider) GetDisplayName() string {
return ui.taskDef.DisplayName
}
// GetDescription returns a description of what this task does
func (ui *GenericUIProvider) GetDescription() string {
return ui.taskDef.Description
}
// GetIcon returns the icon CSS class for this task type
func (ui *GenericUIProvider) GetIcon() string {
return ui.taskDef.Icon
}
// GetCurrentConfig returns current config as TaskConfig
func (ui *GenericUIProvider) GetCurrentConfig() types.TaskConfig {
return ui.taskDef.Config

View File

@@ -2,8 +2,6 @@ package base
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/admin/config"
@@ -75,108 +73,6 @@ func (c *BaseConfig) Validate() error {
return nil
}
// StructToMap converts any struct to a map using reflection
func StructToMap(obj interface{}) map[string]interface{} {
result := make(map[string]interface{})
val := reflect.ValueOf(obj)
// Handle pointer to struct
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
if val.Kind() != reflect.Struct {
return result
}
typ := val.Type()
for i := 0; i < val.NumField(); i++ {
field := val.Field(i)
fieldType := typ.Field(i)
// Skip unexported fields
if !field.CanInterface() {
continue
}
// Handle embedded structs recursively (before JSON tag check)
if field.Kind() == reflect.Struct && fieldType.Anonymous {
embeddedMap := StructToMap(field.Interface())
for k, v := range embeddedMap {
result[k] = v
}
continue
}
// Get JSON tag name
jsonTag := fieldType.Tag.Get("json")
if jsonTag == "" || jsonTag == "-" {
continue
}
// Remove options like ",omitempty"
if commaIdx := strings.Index(jsonTag, ","); commaIdx >= 0 {
jsonTag = jsonTag[:commaIdx]
}
result[jsonTag] = field.Interface()
}
return result
}
// MapToStruct loads data from map into struct using reflection
func MapToStruct(data map[string]interface{}, obj interface{}) error {
val := reflect.ValueOf(obj)
// Must be pointer to struct
if val.Kind() != reflect.Ptr || val.Elem().Kind() != reflect.Struct {
return fmt.Errorf("obj must be pointer to struct")
}
val = val.Elem()
typ := val.Type()
for i := 0; i < val.NumField(); i++ {
field := val.Field(i)
fieldType := typ.Field(i)
// Skip unexported fields
if !field.CanSet() {
continue
}
// Handle embedded structs recursively (before JSON tag check)
if field.Kind() == reflect.Struct && fieldType.Anonymous {
err := MapToStruct(data, field.Addr().Interface())
if err != nil {
return err
}
continue
}
// Get JSON tag name
jsonTag := fieldType.Tag.Get("json")
if jsonTag == "" || jsonTag == "-" {
continue
}
// Remove options like ",omitempty"
if commaIdx := strings.Index(jsonTag, ","); commaIdx >= 0 {
jsonTag = jsonTag[:commaIdx]
}
if value, exists := data[jsonTag]; exists {
err := setFieldValue(field, value)
if err != nil {
return fmt.Errorf("failed to set field %s: %v", jsonTag, err)
}
}
}
return nil
}
// ToMap converts config to map using reflection
// ToTaskPolicy converts BaseConfig to protobuf (partial implementation)
// Note: Concrete implementations should override this to include task-specific config
@@ -207,66 +103,3 @@ func (c *BaseConfig) ApplySchemaDefaults(schema *config.Schema) error {
// Use reflection-based approach for BaseConfig since it needs to handle embedded structs
return schema.ApplyDefaultsToProtobuf(c)
}
// setFieldValue sets a field value with type conversion
func setFieldValue(field reflect.Value, value interface{}) error {
if value == nil {
return nil
}
valueVal := reflect.ValueOf(value)
fieldType := field.Type()
valueType := valueVal.Type()
// Direct assignment if types match
if valueType.AssignableTo(fieldType) {
field.Set(valueVal)
return nil
}
// Type conversion for common cases
switch fieldType.Kind() {
case reflect.Bool:
if b, ok := value.(bool); ok {
field.SetBool(b)
} else {
return fmt.Errorf("cannot convert %T to bool", value)
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v := value.(type) {
case int:
field.SetInt(int64(v))
case int32:
field.SetInt(int64(v))
case int64:
field.SetInt(v)
case float64:
field.SetInt(int64(v))
default:
return fmt.Errorf("cannot convert %T to int", value)
}
case reflect.Float32, reflect.Float64:
switch v := value.(type) {
case float32:
field.SetFloat(float64(v))
case float64:
field.SetFloat(v)
case int:
field.SetFloat(float64(v))
case int64:
field.SetFloat(float64(v))
default:
return fmt.Errorf("cannot convert %T to float", value)
}
case reflect.String:
if s, ok := value.(string); ok {
field.SetString(s)
} else {
return fmt.Errorf("cannot convert %T to string", value)
}
default:
return fmt.Errorf("unsupported field type %s", fieldType.Kind())
}
return nil
}

View File

@@ -1,338 +0,0 @@
package base
import (
"reflect"
"testing"
)
// Test structs that mirror the actual configuration structure
type TestBaseConfig struct {
Enabled bool `json:"enabled"`
ScanIntervalSeconds int `json:"scan_interval_seconds"`
MaxConcurrent int `json:"max_concurrent"`
}
type TestTaskConfig struct {
TestBaseConfig
TaskSpecificField float64 `json:"task_specific_field"`
AnotherSpecificField string `json:"another_specific_field"`
}
type TestNestedConfig struct {
TestBaseConfig
NestedStruct struct {
NestedField string `json:"nested_field"`
} `json:"nested_struct"`
TaskField int `json:"task_field"`
}
func TestStructToMap_WithEmbeddedStruct(t *testing.T) {
// Test case 1: Basic embedded struct
config := &TestTaskConfig{
TestBaseConfig: TestBaseConfig{
Enabled: true,
ScanIntervalSeconds: 1800,
MaxConcurrent: 3,
},
TaskSpecificField: 0.25,
AnotherSpecificField: "test_value",
}
result := StructToMap(config)
// Verify all fields are present
expectedFields := map[string]interface{}{
"enabled": true,
"scan_interval_seconds": 1800,
"max_concurrent": 3,
"task_specific_field": 0.25,
"another_specific_field": "test_value",
}
if len(result) != len(expectedFields) {
t.Errorf("Expected %d fields, got %d. Result: %+v", len(expectedFields), len(result), result)
}
for key, expectedValue := range expectedFields {
if actualValue, exists := result[key]; !exists {
t.Errorf("Missing field: %s", key)
} else if !reflect.DeepEqual(actualValue, expectedValue) {
t.Errorf("Field %s: expected %v (%T), got %v (%T)", key, expectedValue, expectedValue, actualValue, actualValue)
}
}
}
func TestStructToMap_WithNestedStruct(t *testing.T) {
config := &TestNestedConfig{
TestBaseConfig: TestBaseConfig{
Enabled: false,
ScanIntervalSeconds: 3600,
MaxConcurrent: 1,
},
NestedStruct: struct {
NestedField string `json:"nested_field"`
}{
NestedField: "nested_value",
},
TaskField: 42,
}
result := StructToMap(config)
// Verify embedded struct fields are included
if enabled, exists := result["enabled"]; !exists || enabled != false {
t.Errorf("Expected enabled=false from embedded struct, got %v", enabled)
}
if scanInterval, exists := result["scan_interval_seconds"]; !exists || scanInterval != 3600 {
t.Errorf("Expected scan_interval_seconds=3600 from embedded struct, got %v", scanInterval)
}
if maxConcurrent, exists := result["max_concurrent"]; !exists || maxConcurrent != 1 {
t.Errorf("Expected max_concurrent=1 from embedded struct, got %v", maxConcurrent)
}
// Verify regular fields are included
if taskField, exists := result["task_field"]; !exists || taskField != 42 {
t.Errorf("Expected task_field=42, got %v", taskField)
}
// Verify nested struct is included as a whole
if nestedStruct, exists := result["nested_struct"]; !exists {
t.Errorf("Missing nested_struct field")
} else {
// The nested struct should be included as-is, not flattened
if nested, ok := nestedStruct.(struct {
NestedField string `json:"nested_field"`
}); !ok || nested.NestedField != "nested_value" {
t.Errorf("Expected nested_struct with NestedField='nested_value', got %v", nestedStruct)
}
}
}
func TestMapToStruct_WithEmbeddedStruct(t *testing.T) {
// Test data with all fields including embedded struct fields
data := map[string]interface{}{
"enabled": true,
"scan_interval_seconds": 2400,
"max_concurrent": 5,
"task_specific_field": 0.15,
"another_specific_field": "updated_value",
}
config := &TestTaskConfig{}
err := MapToStruct(data, config)
if err != nil {
t.Fatalf("MapToStruct failed: %v", err)
}
// Verify embedded struct fields were set
if config.Enabled != true {
t.Errorf("Expected Enabled=true, got %v", config.Enabled)
}
if config.ScanIntervalSeconds != 2400 {
t.Errorf("Expected ScanIntervalSeconds=2400, got %v", config.ScanIntervalSeconds)
}
if config.MaxConcurrent != 5 {
t.Errorf("Expected MaxConcurrent=5, got %v", config.MaxConcurrent)
}
// Verify regular fields were set
if config.TaskSpecificField != 0.15 {
t.Errorf("Expected TaskSpecificField=0.15, got %v", config.TaskSpecificField)
}
if config.AnotherSpecificField != "updated_value" {
t.Errorf("Expected AnotherSpecificField='updated_value', got %v", config.AnotherSpecificField)
}
}
func TestMapToStruct_PartialData(t *testing.T) {
// Test with only some fields present (simulating form data)
data := map[string]interface{}{
"enabled": false,
"max_concurrent": 2,
"task_specific_field": 0.30,
}
// Start with some initial values
config := &TestTaskConfig{
TestBaseConfig: TestBaseConfig{
Enabled: true,
ScanIntervalSeconds: 1800,
MaxConcurrent: 1,
},
TaskSpecificField: 0.20,
AnotherSpecificField: "initial_value",
}
err := MapToStruct(data, config)
if err != nil {
t.Fatalf("MapToStruct failed: %v", err)
}
// Verify updated fields
if config.Enabled != false {
t.Errorf("Expected Enabled=false (updated), got %v", config.Enabled)
}
if config.MaxConcurrent != 2 {
t.Errorf("Expected MaxConcurrent=2 (updated), got %v", config.MaxConcurrent)
}
if config.TaskSpecificField != 0.30 {
t.Errorf("Expected TaskSpecificField=0.30 (updated), got %v", config.TaskSpecificField)
}
// Verify unchanged fields remain the same
if config.ScanIntervalSeconds != 1800 {
t.Errorf("Expected ScanIntervalSeconds=1800 (unchanged), got %v", config.ScanIntervalSeconds)
}
if config.AnotherSpecificField != "initial_value" {
t.Errorf("Expected AnotherSpecificField='initial_value' (unchanged), got %v", config.AnotherSpecificField)
}
}
func TestRoundTripSerialization(t *testing.T) {
// Test complete round-trip: struct -> map -> struct
original := &TestTaskConfig{
TestBaseConfig: TestBaseConfig{
Enabled: true,
ScanIntervalSeconds: 3600,
MaxConcurrent: 4,
},
TaskSpecificField: 0.18,
AnotherSpecificField: "round_trip_test",
}
// Convert to map
dataMap := StructToMap(original)
// Convert back to struct
roundTrip := &TestTaskConfig{}
err := MapToStruct(dataMap, roundTrip)
if err != nil {
t.Fatalf("Round-trip MapToStruct failed: %v", err)
}
// Verify all fields match
if !reflect.DeepEqual(original.TestBaseConfig, roundTrip.TestBaseConfig) {
t.Errorf("BaseConfig mismatch:\nOriginal: %+v\nRound-trip: %+v", original.TestBaseConfig, roundTrip.TestBaseConfig)
}
if original.TaskSpecificField != roundTrip.TaskSpecificField {
t.Errorf("TaskSpecificField mismatch: %v != %v", original.TaskSpecificField, roundTrip.TaskSpecificField)
}
if original.AnotherSpecificField != roundTrip.AnotherSpecificField {
t.Errorf("AnotherSpecificField mismatch: %v != %v", original.AnotherSpecificField, roundTrip.AnotherSpecificField)
}
}
func TestStructToMap_EmptyStruct(t *testing.T) {
config := &TestTaskConfig{}
result := StructToMap(config)
// Should still include all fields, even with zero values
expectedFields := []string{"enabled", "scan_interval_seconds", "max_concurrent", "task_specific_field", "another_specific_field"}
for _, field := range expectedFields {
if _, exists := result[field]; !exists {
t.Errorf("Missing field: %s", field)
}
}
}
func TestStructToMap_NilPointer(t *testing.T) {
var config *TestTaskConfig = nil
result := StructToMap(config)
if len(result) != 0 {
t.Errorf("Expected empty map for nil pointer, got %+v", result)
}
}
func TestMapToStruct_InvalidInput(t *testing.T) {
data := map[string]interface{}{
"enabled": "not_a_bool", // Wrong type
}
config := &TestTaskConfig{}
err := MapToStruct(data, config)
if err == nil {
t.Errorf("Expected error for invalid input type, but got none")
}
}
func TestMapToStruct_NonPointer(t *testing.T) {
data := map[string]interface{}{
"enabled": true,
}
config := TestTaskConfig{} // Not a pointer
err := MapToStruct(data, config)
if err == nil {
t.Errorf("Expected error for non-pointer input, but got none")
}
}
// Benchmark tests to ensure performance is reasonable
func BenchmarkStructToMap(b *testing.B) {
config := &TestTaskConfig{
TestBaseConfig: TestBaseConfig{
Enabled: true,
ScanIntervalSeconds: 1800,
MaxConcurrent: 3,
},
TaskSpecificField: 0.25,
AnotherSpecificField: "benchmark_test",
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = StructToMap(config)
}
}
func BenchmarkMapToStruct(b *testing.B) {
data := map[string]interface{}{
"enabled": true,
"scan_interval_seconds": 1800,
"max_concurrent": 3,
"task_specific_field": 0.25,
"another_specific_field": "benchmark_test",
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
config := &TestTaskConfig{}
_ = MapToStruct(data, config)
}
}
func BenchmarkRoundTrip(b *testing.B) {
original := &TestTaskConfig{
TestBaseConfig: TestBaseConfig{
Enabled: true,
ScanIntervalSeconds: 1800,
MaxConcurrent: 3,
},
TaskSpecificField: 0.25,
AnotherSpecificField: "benchmark_test",
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
dataMap := StructToMap(original)
roundTrip := &TestTaskConfig{}
_ = MapToStruct(dataMap, roundTrip)
}
}