chore: remove ~50k lines of unreachable dead code (#8913)

* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
This commit is contained in:
Chris Lu
2026-04-03 16:04:27 -07:00
committed by GitHub
parent 8fad85aed7
commit 995dfc4d5d
264 changed files with 62 additions and 46027 deletions

View File

@@ -1,215 +0,0 @@
package topology
import (
"sync"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
)
func TestCapacityReservations_BasicOperations(t *testing.T) {
cr := newCapacityReservations()
diskType := types.HardDriveType
// Test initial state
if count := cr.getReservedCount(diskType); count != 0 {
t.Errorf("Expected 0 reserved count initially, got %d", count)
}
// Test add reservation
reservationId := cr.addReservation(diskType, 5)
if reservationId == "" {
t.Error("Expected non-empty reservation ID")
}
if count := cr.getReservedCount(diskType); count != 5 {
t.Errorf("Expected 5 reserved count, got %d", count)
}
// Test multiple reservations
cr.addReservation(diskType, 3)
if count := cr.getReservedCount(diskType); count != 8 {
t.Errorf("Expected 8 reserved count after second reservation, got %d", count)
}
// Test remove reservation
success := cr.removeReservation(reservationId)
if !success {
t.Error("Expected successful removal of existing reservation")
}
if count := cr.getReservedCount(diskType); count != 3 {
t.Errorf("Expected 3 reserved count after removal, got %d", count)
}
// Test remove non-existent reservation
success = cr.removeReservation("non-existent-id")
if success {
t.Error("Expected failure when removing non-existent reservation")
}
}
func TestCapacityReservations_ExpiredCleaning(t *testing.T) {
cr := newCapacityReservations()
diskType := types.HardDriveType
// Add reservations and manipulate their creation time
reservationId1 := cr.addReservation(diskType, 3)
reservationId2 := cr.addReservation(diskType, 2)
// Make one reservation "old"
cr.Lock()
if reservation, exists := cr.reservations[reservationId1]; exists {
reservation.createdAt = time.Now().Add(-10 * time.Minute) // 10 minutes ago
}
cr.Unlock()
// Clean expired reservations (5 minute expiration)
cr.cleanExpiredReservations(5 * time.Minute)
// Only the non-expired reservation should remain
if count := cr.getReservedCount(diskType); count != 2 {
t.Errorf("Expected 2 reserved count after cleaning, got %d", count)
}
// Verify the right reservation was kept
if !cr.removeReservation(reservationId2) {
t.Error("Expected recent reservation to still exist")
}
if cr.removeReservation(reservationId1) {
t.Error("Expected old reservation to be cleaned up")
}
}
func TestCapacityReservations_DifferentDiskTypes(t *testing.T) {
cr := newCapacityReservations()
// Add reservations for different disk types
cr.addReservation(types.HardDriveType, 5)
cr.addReservation(types.SsdType, 3)
// Check counts are separate
if count := cr.getReservedCount(types.HardDriveType); count != 5 {
t.Errorf("Expected 5 HDD reserved count, got %d", count)
}
if count := cr.getReservedCount(types.SsdType); count != 3 {
t.Errorf("Expected 3 SSD reserved count, got %d", count)
}
}
func TestNodeImpl_ReservationMethods(t *testing.T) {
// Create a test data node
dn := NewDataNode("test-node")
diskType := types.HardDriveType
// Set up some capacity
diskUsage := dn.diskUsages.getOrCreateDisk(diskType)
diskUsage.maxVolumeCount = 10
diskUsage.volumeCount = 5 // 5 volumes free initially
option := &VolumeGrowOption{DiskType: diskType}
// Test available space calculation
available := dn.AvailableSpaceFor(option)
if available != 5 {
t.Errorf("Expected 5 available slots, got %d", available)
}
availableForReservation := dn.AvailableSpaceForReservation(option)
if availableForReservation != 5 {
t.Errorf("Expected 5 available slots for reservation, got %d", availableForReservation)
}
// Test successful reservation
reservationId, success := dn.TryReserveCapacity(diskType, 3)
if !success {
t.Error("Expected successful reservation")
}
if reservationId == "" {
t.Error("Expected non-empty reservation ID")
}
// Available space should be reduced by reservations
availableForReservation = dn.AvailableSpaceForReservation(option)
if availableForReservation != 2 {
t.Errorf("Expected 2 available slots after reservation, got %d", availableForReservation)
}
// Base available space should remain unchanged
available = dn.AvailableSpaceFor(option)
if available != 5 {
t.Errorf("Expected base available to remain 5, got %d", available)
}
// Test reservation failure when insufficient capacity
_, success = dn.TryReserveCapacity(diskType, 3)
if success {
t.Error("Expected reservation failure due to insufficient capacity")
}
// Test release reservation
dn.ReleaseReservedCapacity(reservationId)
availableForReservation = dn.AvailableSpaceForReservation(option)
if availableForReservation != 5 {
t.Errorf("Expected 5 available slots after release, got %d", availableForReservation)
}
}
func TestNodeImpl_ConcurrentReservations(t *testing.T) {
dn := NewDataNode("test-node")
diskType := types.HardDriveType
// Set up capacity
diskUsage := dn.diskUsages.getOrCreateDisk(diskType)
diskUsage.maxVolumeCount = 10
diskUsage.volumeCount = 0 // 10 volumes free initially
// Test concurrent reservations using goroutines
var wg sync.WaitGroup
var reservationIds sync.Map
concurrentRequests := 10
wg.Add(concurrentRequests)
for i := 0; i < concurrentRequests; i++ {
go func(i int) {
defer wg.Done()
if reservationId, success := dn.TryReserveCapacity(diskType, 1); success {
reservationIds.Store(reservationId, true)
t.Logf("goroutine %d: Successfully reserved %s", i, reservationId)
} else {
t.Errorf("goroutine %d: Expected successful reservation", i)
}
}(i)
}
wg.Wait()
// Should have no more capacity
option := &VolumeGrowOption{DiskType: diskType}
if available := dn.AvailableSpaceForReservation(option); available != 0 {
t.Errorf("Expected 0 available slots after all reservations, got %d", available)
// Debug: check total reserved
reservedCount := dn.capacityReservations.getReservedCount(diskType)
t.Logf("Debug: Total reserved count: %d", reservedCount)
}
// Next reservation should fail
_, success := dn.TryReserveCapacity(diskType, 1)
if success {
t.Error("Expected reservation failure when at capacity")
}
// Release all reservations
reservationIds.Range(func(key, value interface{}) bool {
dn.ReleaseReservedCapacity(key.(string))
return true
})
// Should have full capacity back
if available := dn.AvailableSpaceForReservation(option); available != 10 {
t.Errorf("Expected 10 available slots after releasing all, got %d", available)
}
}

View File

@@ -118,16 +118,6 @@ func (a *DiskUsageCounts) FreeSpace() int64 {
return freeVolumeSlotCount
}
func (a *DiskUsageCounts) minus(b *DiskUsageCounts) *DiskUsageCounts {
return &DiskUsageCounts{
volumeCount: a.volumeCount - b.volumeCount,
remoteVolumeCount: a.remoteVolumeCount - b.remoteVolumeCount,
activeVolumeCount: a.activeVolumeCount - b.activeVolumeCount,
ecShardCount: a.ecShardCount - b.ecShardCount,
maxVolumeCount: a.maxVolumeCount - b.maxVolumeCount,
}
}
func (du *DiskUsages) getOrCreateDisk(diskType types.DiskType) *DiskUsageCounts {
du.Lock()
defer du.Unlock()

View File

@@ -40,13 +40,6 @@ func newCapacityReservations() *CapacityReservations {
}
}
func (cr *CapacityReservations) addReservation(diskType types.DiskType, count int64) string {
cr.Lock()
defer cr.Unlock()
return cr.doAddReservation(diskType, count)
}
func (cr *CapacityReservations) removeReservation(reservationId string) bool {
cr.Lock()
defer cr.Unlock()

View File

@@ -40,10 +40,6 @@ func ExistCopies() stateIndicator {
return func(state copyState) bool { return state != noCopies }
}
func NoCopies() stateIndicator {
return func(state copyState) bool { return state == noCopies }
}
type volumesBinaryState struct {
rp *super_block.ReplicaPlacement
name volumeState // the name for volume state (eg. "Readonly", "Oversized")
@@ -264,12 +260,6 @@ func (vl *VolumeLayout) isCrowdedVolume(v *storage.VolumeInfo) bool {
return float64(v.Size) > float64(vl.volumeSizeLimit)*VolumeGrowStrategy.Threshold
}
func (vl *VolumeLayout) isWritable(v *storage.VolumeInfo) bool {
return !vl.isOversized(v) &&
v.Version == needle.GetCurrentVersion() &&
!v.ReadOnly
}
func (vl *VolumeLayout) isEmpty() bool {
vl.accessLock.RLock()
defer vl.accessLock.RUnlock()

View File

@@ -1,190 +0,0 @@
package topology
import (
"testing"
"github.com/seaweedfs/seaweedfs/weed/storage"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
)
func TestVolumesBinaryState(t *testing.T) {
vids := []needle.VolumeId{
needle.VolumeId(1),
needle.VolumeId(2),
needle.VolumeId(3),
needle.VolumeId(4),
needle.VolumeId(5),
}
dns := []*DataNode{
&DataNode{
Ip: "127.0.0.1",
Port: 8081,
},
&DataNode{
Ip: "127.0.0.1",
Port: 8082,
},
&DataNode{
Ip: "127.0.0.1",
Port: 8083,
},
}
rp, _ := super_block.NewReplicaPlacementFromString("002")
state_exist := NewVolumesBinaryState(readOnlyState, rp, ExistCopies())
state_exist.Add(vids[0], dns[0])
state_exist.Add(vids[0], dns[1])
state_exist.Add(vids[1], dns[2])
state_exist.Add(vids[2], dns[1])
state_exist.Add(vids[4], dns[1])
state_exist.Add(vids[4], dns[2])
state_no := NewVolumesBinaryState(readOnlyState, rp, NoCopies())
state_no.Add(vids[0], dns[0])
state_no.Add(vids[0], dns[1])
state_no.Add(vids[3], dns[1])
tests := []struct {
name string
state *volumesBinaryState
expectResult []bool
update func()
expectResultAfterUpdate []bool
}{
{
name: "mark true when copies exist",
state: state_exist,
expectResult: []bool{true, true, true, false, true},
update: func() {
state_exist.Remove(vids[0], dns[2])
state_exist.Remove(vids[1], dns[2])
state_exist.Remove(vids[3], dns[2])
state_exist.Remove(vids[4], dns[1])
state_exist.Remove(vids[4], dns[2])
},
expectResultAfterUpdate: []bool{true, false, true, false, false},
},
{
name: "mark true when no copies exist",
state: state_no,
expectResult: []bool{false, true, true, false, true},
update: func() {
state_no.Remove(vids[0], dns[2])
state_no.Remove(vids[1], dns[2])
state_no.Add(vids[2], dns[1])
state_no.Remove(vids[3], dns[1])
state_no.Remove(vids[4], dns[2])
},
expectResultAfterUpdate: []bool{false, true, false, true, true},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var result []bool
for index, _ := range vids {
result = append(result, test.state.IsTrue(vids[index]))
}
if len(result) != len(test.expectResult) {
t.Fatalf("len(result) != len(expectResult), got %d, expected %d\n",
len(result), len(test.expectResult))
}
for index, val := range result {
if val != test.expectResult[index] {
t.Fatalf("result not matched, index %d, got %v, expected %v\n",
index, val, test.expectResult[index])
}
}
test.update()
var updateResult []bool
for index, _ := range vids {
updateResult = append(updateResult, test.state.IsTrue(vids[index]))
}
if len(updateResult) != len(test.expectResultAfterUpdate) {
t.Fatalf("len(updateResult) != len(expectResultAfterUpdate), got %d, expected %d\n",
len(updateResult), len(test.expectResultAfterUpdate))
}
for index, val := range updateResult {
if val != test.expectResultAfterUpdate[index] {
t.Fatalf("update result not matched, index %d, got %v, expected %v\n",
index, val, test.expectResultAfterUpdate[index])
}
}
})
}
}
func TestVolumeLayoutCrowdedState(t *testing.T) {
rp, _ := super_block.NewReplicaPlacementFromString("000")
ttl, _ := needle.ReadTTL("")
diskType := types.HardDriveType
vl := NewVolumeLayout(rp, ttl, diskType, 1024*1024*1024, false)
vid := needle.VolumeId(1)
dn := &DataNode{
NodeImpl: NodeImpl{
id: "test-node",
},
Ip: "127.0.0.1",
Port: 8080,
}
// Create a volume info
volumeInfo := &storage.VolumeInfo{
Id: vid,
ReplicaPlacement: rp,
Ttl: ttl,
DiskType: string(diskType),
}
// Register the volume
vl.RegisterVolume(volumeInfo, dn)
// Add the volume to writables
vl.accessLock.Lock()
vl.setVolumeWritable(vid)
vl.accessLock.Unlock()
// Mark the volume as crowded
vl.SetVolumeCrowded(vid)
t.Run("should be crowded after being marked", func(t *testing.T) {
vl.accessLock.RLock()
_, isCrowded := vl.crowded[vid]
vl.accessLock.RUnlock()
if !isCrowded {
t.Fatal("Volume should be marked as crowded after SetVolumeCrowded")
}
})
// Remove from writable (simulating temporary unwritable state)
vl.accessLock.Lock()
vl.removeFromWritable(vid)
vl.accessLock.Unlock()
t.Run("should remain crowded after becoming unwritable", func(t *testing.T) {
// This is the fix for issue #6712 - crowded state should persist
vl.accessLock.RLock()
_, stillCrowded := vl.crowded[vid]
vl.accessLock.RUnlock()
if !stillCrowded {
t.Fatal("Volume should remain crowded after becoming unwritable (fix for issue #6712)")
}
})
// Now unregister the volume completely
vl.UnRegisterVolume(volumeInfo, dn)
t.Run("should not be crowded after unregistering", func(t *testing.T) {
vl.accessLock.RLock()
_, stillCrowdedAfterUnregister := vl.crowded[vid]
vl.accessLock.RUnlock()
if stillCrowdedAfterUnregister {
t.Fatal("Volume should be removed from crowded map after full unregistration")
}
})
}