chore: remove ~50k lines of unreachable dead code (#8913)

* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
This commit is contained in:
Chris Lu
2026-04-03 16:04:27 -07:00
committed by GitHub
parent 8fad85aed7
commit 995dfc4d5d
264 changed files with 62 additions and 46027 deletions

View File

@@ -1,138 +0,0 @@
package balance
import (
"sync"
"time"
)
// BalanceMetrics contains balance-specific monitoring data
type BalanceMetrics struct {
// Execution metrics
VolumesBalanced int64 `json:"volumes_balanced"`
TotalDataTransferred int64 `json:"total_data_transferred"`
AverageImbalance float64 `json:"average_imbalance"`
LastBalanceTime time.Time `json:"last_balance_time"`
// Performance metrics
AverageTransferSpeed float64 `json:"average_transfer_speed_mbps"`
TotalExecutionTime int64 `json:"total_execution_time_seconds"`
SuccessfulOperations int64 `json:"successful_operations"`
FailedOperations int64 `json:"failed_operations"`
// Current task metrics
CurrentImbalanceScore float64 `json:"current_imbalance_score"`
PlannedDestinations int `json:"planned_destinations"`
mutex sync.RWMutex
}
// NewBalanceMetrics creates a new balance metrics instance
func NewBalanceMetrics() *BalanceMetrics {
return &BalanceMetrics{
LastBalanceTime: time.Now(),
}
}
// RecordVolumeBalanced records a successful volume balance operation
func (m *BalanceMetrics) RecordVolumeBalanced(volumeSize int64, transferTime time.Duration) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.VolumesBalanced++
m.TotalDataTransferred += volumeSize
m.SuccessfulOperations++
m.LastBalanceTime = time.Now()
m.TotalExecutionTime += int64(transferTime.Seconds())
// Calculate average transfer speed (MB/s)
if transferTime > 0 {
speedMBps := float64(volumeSize) / (1024 * 1024) / transferTime.Seconds()
if m.AverageTransferSpeed == 0 {
m.AverageTransferSpeed = speedMBps
} else {
// Exponential moving average
m.AverageTransferSpeed = 0.8*m.AverageTransferSpeed + 0.2*speedMBps
}
}
}
// RecordFailure records a failed balance operation
func (m *BalanceMetrics) RecordFailure() {
m.mutex.Lock()
defer m.mutex.Unlock()
m.FailedOperations++
}
// UpdateImbalanceScore updates the current cluster imbalance score
func (m *BalanceMetrics) UpdateImbalanceScore(score float64) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.CurrentImbalanceScore = score
// Update average imbalance with exponential moving average
if m.AverageImbalance == 0 {
m.AverageImbalance = score
} else {
m.AverageImbalance = 0.9*m.AverageImbalance + 0.1*score
}
}
// SetPlannedDestinations sets the number of planned destinations
func (m *BalanceMetrics) SetPlannedDestinations(count int) {
m.mutex.Lock()
defer m.mutex.Unlock()
m.PlannedDestinations = count
}
// GetMetrics returns a copy of the current metrics (without the mutex)
func (m *BalanceMetrics) GetMetrics() BalanceMetrics {
m.mutex.RLock()
defer m.mutex.RUnlock()
// Create a copy without the mutex to avoid copying lock value
return BalanceMetrics{
VolumesBalanced: m.VolumesBalanced,
TotalDataTransferred: m.TotalDataTransferred,
AverageImbalance: m.AverageImbalance,
LastBalanceTime: m.LastBalanceTime,
AverageTransferSpeed: m.AverageTransferSpeed,
TotalExecutionTime: m.TotalExecutionTime,
SuccessfulOperations: m.SuccessfulOperations,
FailedOperations: m.FailedOperations,
CurrentImbalanceScore: m.CurrentImbalanceScore,
PlannedDestinations: m.PlannedDestinations,
}
}
// GetSuccessRate returns the success rate as a percentage
func (m *BalanceMetrics) GetSuccessRate() float64 {
m.mutex.RLock()
defer m.mutex.RUnlock()
total := m.SuccessfulOperations + m.FailedOperations
if total == 0 {
return 100.0
}
return float64(m.SuccessfulOperations) / float64(total) * 100.0
}
// Reset resets all metrics to zero
func (m *BalanceMetrics) Reset() {
m.mutex.Lock()
defer m.mutex.Unlock()
*m = BalanceMetrics{
LastBalanceTime: time.Now(),
}
}
// Global metrics instance for balance tasks
var globalBalanceMetrics = NewBalanceMetrics()
// GetGlobalBalanceMetrics returns the global balance metrics instance
func GetGlobalBalanceMetrics() *BalanceMetrics {
return globalBalanceMetrics
}