chore: remove ~50k lines of unreachable dead code (#8913)

* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
This commit is contained in:
Chris Lu
2026-04-03 16:04:27 -07:00
committed by GitHub
parent 8fad85aed7
commit 995dfc4d5d
264 changed files with 62 additions and 46027 deletions

View File

@@ -28,28 +28,6 @@ func (imt *InflightMessageTracker) EnflightMessage(key []byte, tsNs int64) {
imt.timestamps.EnflightTimestamp(tsNs)
}
// IsMessageAcknowledged returns true if the message has been acknowledged.
// If the message is older than the oldest inflight messages, returns false.
// returns false if the message is inflight.
// Otherwise, returns false if the message is old and can be ignored.
func (imt *InflightMessageTracker) IsMessageAcknowledged(key []byte, tsNs int64) bool {
imt.mu.Lock()
defer imt.mu.Unlock()
if tsNs <= imt.timestamps.OldestAckedTimestamp() {
return true
}
if tsNs > imt.timestamps.Latest() {
return false
}
if _, found := imt.messages[string(key)]; found {
return false
}
return true
}
// AcknowledgeMessage acknowledges the message with the key and timestamp.
func (imt *InflightMessageTracker) AcknowledgeMessage(key []byte, tsNs int64) bool {
// fmt.Printf("AcknowledgeMessage(%s,%d)\n", string(key), tsNs)
@@ -164,8 +142,3 @@ func (rb *RingBuffer) AckTimestamp(timestamp int64) {
func (rb *RingBuffer) OldestAckedTimestamp() int64 {
return rb.maxAllAckedTs
}
// Latest returns the most recently known timestamp in the ring buffer.
func (rb *RingBuffer) Latest() int64 {
return rb.maxTimestamp
}

View File

@@ -1,134 +0,0 @@
package sub_coordinator
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestRingBuffer(t *testing.T) {
// Initialize a RingBuffer with capacity 5
rb := NewRingBuffer(5)
// Add timestamps to the buffer
timestamps := []int64{100, 200, 300, 400, 500}
for _, ts := range timestamps {
rb.EnflightTimestamp(ts)
}
// Test Add method and buffer size
expectedSize := 5
if rb.size != expectedSize {
t.Errorf("Expected buffer size %d, got %d", expectedSize, rb.size)
}
assert.Equal(t, int64(0), rb.OldestAckedTimestamp())
assert.Equal(t, int64(500), rb.Latest())
rb.AckTimestamp(200)
assert.Equal(t, int64(0), rb.OldestAckedTimestamp())
rb.AckTimestamp(100)
assert.Equal(t, int64(200), rb.OldestAckedTimestamp())
rb.EnflightTimestamp(int64(600))
rb.EnflightTimestamp(int64(700))
rb.AckTimestamp(500)
assert.Equal(t, int64(200), rb.OldestAckedTimestamp())
rb.AckTimestamp(400)
assert.Equal(t, int64(200), rb.OldestAckedTimestamp())
rb.AckTimestamp(300)
assert.Equal(t, int64(500), rb.OldestAckedTimestamp())
assert.Equal(t, int64(700), rb.Latest())
}
func TestInflightMessageTracker(t *testing.T) {
// Initialize an InflightMessageTracker with capacity 5
tracker := NewInflightMessageTracker(5)
// Add inflight messages
key := []byte("1")
timestamp := int64(1)
tracker.EnflightMessage(key, timestamp)
// Test IsMessageAcknowledged method
isOld := tracker.IsMessageAcknowledged(key, timestamp-10)
if !isOld {
t.Error("Expected message to be old")
}
// Test AcknowledgeMessage method
acked := tracker.AcknowledgeMessage(key, timestamp)
if !acked {
t.Error("Expected message to be acked")
}
if _, exists := tracker.messages[string(key)]; exists {
t.Error("Expected message to be deleted after ack")
}
if tracker.timestamps.size != 0 {
t.Error("Expected buffer size to be 0 after ack")
}
assert.Equal(t, timestamp, tracker.GetOldestAckedTimestamp())
}
func TestInflightMessageTracker2(t *testing.T) {
// Initialize an InflightMessageTracker with initial capacity 1
tracker := NewInflightMessageTracker(1)
tracker.EnflightMessage([]byte("1"), int64(1))
tracker.EnflightMessage([]byte("2"), int64(2))
tracker.EnflightMessage([]byte("3"), int64(3))
tracker.EnflightMessage([]byte("4"), int64(4))
tracker.EnflightMessage([]byte("5"), int64(5))
assert.True(t, tracker.AcknowledgeMessage([]byte("1"), int64(1)))
assert.Equal(t, int64(1), tracker.GetOldestAckedTimestamp())
// Test IsMessageAcknowledged method
isAcked := tracker.IsMessageAcknowledged([]byte("2"), int64(2))
if isAcked {
t.Error("Expected message to be not acked")
}
// Test AcknowledgeMessage method
assert.True(t, tracker.AcknowledgeMessage([]byte("2"), int64(2)))
assert.Equal(t, int64(2), tracker.GetOldestAckedTimestamp())
}
func TestInflightMessageTracker3(t *testing.T) {
// Initialize an InflightMessageTracker with initial capacity 1
tracker := NewInflightMessageTracker(1)
tracker.EnflightMessage([]byte("1"), int64(1))
tracker.EnflightMessage([]byte("2"), int64(2))
tracker.EnflightMessage([]byte("3"), int64(3))
assert.True(t, tracker.AcknowledgeMessage([]byte("1"), int64(1)))
tracker.EnflightMessage([]byte("4"), int64(4))
tracker.EnflightMessage([]byte("5"), int64(5))
assert.True(t, tracker.AcknowledgeMessage([]byte("2"), int64(2)))
assert.True(t, tracker.AcknowledgeMessage([]byte("3"), int64(3)))
tracker.EnflightMessage([]byte("6"), int64(6))
tracker.EnflightMessage([]byte("7"), int64(7))
assert.True(t, tracker.AcknowledgeMessage([]byte("4"), int64(4)))
assert.True(t, tracker.AcknowledgeMessage([]byte("5"), int64(5)))
assert.True(t, tracker.AcknowledgeMessage([]byte("6"), int64(6)))
assert.Equal(t, int64(6), tracker.GetOldestAckedTimestamp())
assert.True(t, tracker.AcknowledgeMessage([]byte("7"), int64(7)))
assert.Equal(t, int64(7), tracker.GetOldestAckedTimestamp())
}
func TestInflightMessageTracker4(t *testing.T) {
// Initialize an InflightMessageTracker with initial capacity 1
tracker := NewInflightMessageTracker(1)
tracker.EnflightMessage([]byte("1"), int64(1))
tracker.EnflightMessage([]byte("2"), int64(2))
assert.True(t, tracker.AcknowledgeMessage([]byte("1"), int64(1)))
assert.True(t, tracker.AcknowledgeMessage([]byte("2"), int64(2)))
tracker.EnflightMessage([]byte("3"), int64(3))
assert.True(t, tracker.AcknowledgeMessage([]byte("3"), int64(3)))
assert.Equal(t, int64(3), tracker.GetOldestAckedTimestamp())
}

View File

@@ -1,130 +1,6 @@
package sub_coordinator
import (
"fmt"
"time"
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
)
type PartitionConsumerMapping struct {
currentMapping *PartitionSlotToConsumerInstanceList
prevMappings []*PartitionSlotToConsumerInstanceList
}
// Balance goal:
// 1. max processing power utilization
// 2. allow one consumer instance to be down unexpectedly
// without affecting the processing power utilization
func (pcm *PartitionConsumerMapping) BalanceToConsumerInstances(partitionSlotToBrokerList *pub_balancer.PartitionSlotToBrokerList, consumerInstances []*ConsumerGroupInstance) {
if len(partitionSlotToBrokerList.PartitionSlots) == 0 || len(consumerInstances) == 0 {
return
}
newMapping := NewPartitionSlotToConsumerInstanceList(partitionSlotToBrokerList.RingSize, time.Now())
var prevMapping *PartitionSlotToConsumerInstanceList
if len(pcm.prevMappings) > 0 {
prevMapping = pcm.prevMappings[len(pcm.prevMappings)-1]
} else {
prevMapping = nil
}
newMapping.PartitionSlots = doBalanceSticky(partitionSlotToBrokerList.PartitionSlots, consumerInstances, prevMapping)
if pcm.currentMapping != nil {
pcm.prevMappings = append(pcm.prevMappings, pcm.currentMapping)
if len(pcm.prevMappings) > 10 {
pcm.prevMappings = pcm.prevMappings[1:]
}
}
pcm.currentMapping = newMapping
}
func doBalanceSticky(partitions []*pub_balancer.PartitionSlotToBroker, consumerInstances []*ConsumerGroupInstance, prevMapping *PartitionSlotToConsumerInstanceList) (partitionSlots []*PartitionSlotToConsumerInstance) {
// collect previous consumer instance ids
prevConsumerInstanceIds := make(map[ConsumerGroupInstanceId]struct{})
if prevMapping != nil {
for _, prevPartitionSlot := range prevMapping.PartitionSlots {
if prevPartitionSlot.AssignedInstanceId != "" {
prevConsumerInstanceIds[prevPartitionSlot.AssignedInstanceId] = struct{}{}
}
}
}
// collect current consumer instance ids
currConsumerInstanceIds := make(map[ConsumerGroupInstanceId]struct{})
for _, consumerInstance := range consumerInstances {
currConsumerInstanceIds[consumerInstance.InstanceId] = struct{}{}
}
// check deleted consumer instances
deletedConsumerInstanceIds := make(map[ConsumerGroupInstanceId]struct{})
for consumerInstanceId := range prevConsumerInstanceIds {
if _, ok := currConsumerInstanceIds[consumerInstanceId]; !ok {
deletedConsumerInstanceIds[consumerInstanceId] = struct{}{}
}
}
// convert partition slots from list to a map
prevPartitionSlotMap := make(map[string]*PartitionSlotToConsumerInstance)
if prevMapping != nil {
for _, partitionSlot := range prevMapping.PartitionSlots {
key := fmt.Sprintf("%d-%d", partitionSlot.RangeStart, partitionSlot.RangeStop)
prevPartitionSlotMap[key] = partitionSlot
}
}
// make a copy of old mapping, skipping the deleted consumer instances
newPartitionSlots := make([]*PartitionSlotToConsumerInstance, 0, len(partitions))
for _, partition := range partitions {
newPartitionSlots = append(newPartitionSlots, &PartitionSlotToConsumerInstance{
RangeStart: partition.RangeStart,
RangeStop: partition.RangeStop,
UnixTimeNs: partition.UnixTimeNs,
Broker: partition.AssignedBroker,
FollowerBroker: partition.FollowerBroker,
})
}
for _, newPartitionSlot := range newPartitionSlots {
key := fmt.Sprintf("%d-%d", newPartitionSlot.RangeStart, newPartitionSlot.RangeStop)
if prevPartitionSlot, ok := prevPartitionSlotMap[key]; ok {
if _, ok := deletedConsumerInstanceIds[prevPartitionSlot.AssignedInstanceId]; !ok {
newPartitionSlot.AssignedInstanceId = prevPartitionSlot.AssignedInstanceId
}
}
}
// for all consumer instances, count the average number of partitions
// that are assigned to them
consumerInstancePartitionCount := make(map[ConsumerGroupInstanceId]int)
for _, newPartitionSlot := range newPartitionSlots {
if newPartitionSlot.AssignedInstanceId != "" {
consumerInstancePartitionCount[newPartitionSlot.AssignedInstanceId]++
}
}
// average number of partitions that are assigned to each consumer instance
averageConsumerInstanceLoad := float32(len(partitions)) / float32(len(consumerInstances))
// assign unassigned partition slots to consumer instances that is underloaded
consumerInstanceIdsIndex := 0
for _, newPartitionSlot := range newPartitionSlots {
if newPartitionSlot.AssignedInstanceId == "" {
for avoidDeadLoop := len(consumerInstances); avoidDeadLoop > 0; avoidDeadLoop-- {
consumerInstance := consumerInstances[consumerInstanceIdsIndex]
if float32(consumerInstancePartitionCount[consumerInstance.InstanceId]) < averageConsumerInstanceLoad {
newPartitionSlot.AssignedInstanceId = consumerInstance.InstanceId
consumerInstancePartitionCount[consumerInstance.InstanceId]++
consumerInstanceIdsIndex++
if consumerInstanceIdsIndex >= len(consumerInstances) {
consumerInstanceIdsIndex = 0
}
break
} else {
consumerInstanceIdsIndex++
if consumerInstanceIdsIndex >= len(consumerInstances) {
consumerInstanceIdsIndex = 0
}
}
}
}
}
return newPartitionSlots
}

View File

@@ -1,385 +0,0 @@
package sub_coordinator
import (
"reflect"
"testing"
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
)
func Test_doBalanceSticky(t *testing.T) {
type args struct {
partitions []*pub_balancer.PartitionSlotToBroker
consumerInstanceIds []*ConsumerGroupInstance
prevMapping *PartitionSlotToConsumerInstanceList
}
tests := []struct {
name string
args args
wantPartitionSlots []*PartitionSlotToConsumerInstance
}{
{
name: "1 consumer instance, 1 partition",
args: args{
partitions: []*pub_balancer.PartitionSlotToBroker{
{
RangeStart: 0,
RangeStop: 100,
},
},
consumerInstanceIds: []*ConsumerGroupInstance{
{
InstanceId: "consumer-instance-1",
MaxPartitionCount: 1,
},
},
prevMapping: nil,
},
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
{
RangeStart: 0,
RangeStop: 100,
AssignedInstanceId: "consumer-instance-1",
},
},
},
{
name: "2 consumer instances, 1 partition",
args: args{
partitions: []*pub_balancer.PartitionSlotToBroker{
{
RangeStart: 0,
RangeStop: 100,
},
},
consumerInstanceIds: []*ConsumerGroupInstance{
{
InstanceId: "consumer-instance-1",
MaxPartitionCount: 1,
},
{
InstanceId: "consumer-instance-2",
MaxPartitionCount: 1,
},
},
prevMapping: nil,
},
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
{
RangeStart: 0,
RangeStop: 100,
AssignedInstanceId: "consumer-instance-1",
},
},
},
{
name: "1 consumer instance, 2 partitions",
args: args{
partitions: []*pub_balancer.PartitionSlotToBroker{
{
RangeStart: 0,
RangeStop: 50,
},
{
RangeStart: 50,
RangeStop: 100,
},
},
consumerInstanceIds: []*ConsumerGroupInstance{
{
InstanceId: "consumer-instance-1",
MaxPartitionCount: 1,
},
},
prevMapping: nil,
},
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
{
RangeStart: 0,
RangeStop: 50,
AssignedInstanceId: "consumer-instance-1",
},
{
RangeStart: 50,
RangeStop: 100,
AssignedInstanceId: "consumer-instance-1",
},
},
},
{
name: "2 consumer instances, 2 partitions",
args: args{
partitions: []*pub_balancer.PartitionSlotToBroker{
{
RangeStart: 0,
RangeStop: 50,
},
{
RangeStart: 50,
RangeStop: 100,
},
},
consumerInstanceIds: []*ConsumerGroupInstance{
{
InstanceId: "consumer-instance-1",
MaxPartitionCount: 1,
},
{
InstanceId: "consumer-instance-2",
MaxPartitionCount: 1,
},
},
prevMapping: nil,
},
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
{
RangeStart: 0,
RangeStop: 50,
AssignedInstanceId: "consumer-instance-1",
},
{
RangeStart: 50,
RangeStop: 100,
AssignedInstanceId: "consumer-instance-2",
},
},
},
{
name: "2 consumer instances, 2 partitions, 1 deleted consumer instance",
args: args{
partitions: []*pub_balancer.PartitionSlotToBroker{
{
RangeStart: 0,
RangeStop: 50,
},
{
RangeStart: 50,
RangeStop: 100,
},
},
consumerInstanceIds: []*ConsumerGroupInstance{
{
InstanceId: "consumer-instance-1",
MaxPartitionCount: 1,
},
{
InstanceId: "consumer-instance-2",
MaxPartitionCount: 1,
},
},
prevMapping: &PartitionSlotToConsumerInstanceList{
PartitionSlots: []*PartitionSlotToConsumerInstance{
{
RangeStart: 0,
RangeStop: 50,
AssignedInstanceId: "consumer-instance-3",
},
{
RangeStart: 50,
RangeStop: 100,
AssignedInstanceId: "consumer-instance-2",
},
},
},
},
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
{
RangeStart: 0,
RangeStop: 50,
AssignedInstanceId: "consumer-instance-1",
},
{
RangeStart: 50,
RangeStop: 100,
AssignedInstanceId: "consumer-instance-2",
},
},
},
{
name: "2 consumer instances, 2 partitions, 1 new consumer instance",
args: args{
partitions: []*pub_balancer.PartitionSlotToBroker{
{
RangeStart: 0,
RangeStop: 50,
},
{
RangeStart: 50,
RangeStop: 100,
},
},
consumerInstanceIds: []*ConsumerGroupInstance{
{
InstanceId: "consumer-instance-1",
MaxPartitionCount: 1,
},
{
InstanceId: "consumer-instance-2",
MaxPartitionCount: 1,
},
{
InstanceId: "consumer-instance-3",
MaxPartitionCount: 1,
},
},
prevMapping: &PartitionSlotToConsumerInstanceList{
PartitionSlots: []*PartitionSlotToConsumerInstance{
{
RangeStart: 0,
RangeStop: 50,
AssignedInstanceId: "consumer-instance-3",
},
{
RangeStart: 50,
RangeStop: 100,
AssignedInstanceId: "consumer-instance-2",
},
},
},
},
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
{
RangeStart: 0,
RangeStop: 50,
AssignedInstanceId: "consumer-instance-3",
},
{
RangeStart: 50,
RangeStop: 100,
AssignedInstanceId: "consumer-instance-2",
},
},
},
{
name: "2 consumer instances, 2 partitions, 1 new partition",
args: args{
partitions: []*pub_balancer.PartitionSlotToBroker{
{
RangeStart: 0,
RangeStop: 50,
},
{
RangeStart: 50,
RangeStop: 100,
},
{
RangeStart: 100,
RangeStop: 150,
},
},
consumerInstanceIds: []*ConsumerGroupInstance{
{
InstanceId: "consumer-instance-1",
MaxPartitionCount: 1,
},
{
InstanceId: "consumer-instance-2",
MaxPartitionCount: 1,
},
},
prevMapping: &PartitionSlotToConsumerInstanceList{
PartitionSlots: []*PartitionSlotToConsumerInstance{
{
RangeStart: 0,
RangeStop: 50,
AssignedInstanceId: "consumer-instance-1",
},
{
RangeStart: 50,
RangeStop: 100,
AssignedInstanceId: "consumer-instance-2",
},
},
},
},
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
{
RangeStart: 0,
RangeStop: 50,
AssignedInstanceId: "consumer-instance-1",
},
{
RangeStart: 50,
RangeStop: 100,
AssignedInstanceId: "consumer-instance-2",
},
{
RangeStart: 100,
RangeStop: 150,
AssignedInstanceId: "consumer-instance-1",
},
},
},
{
name: "2 consumer instances, 2 partitions, 1 new partition, 1 new consumer instance",
args: args{
partitions: []*pub_balancer.PartitionSlotToBroker{
{
RangeStart: 0,
RangeStop: 50,
},
{
RangeStart: 50,
RangeStop: 100,
},
{
RangeStart: 100,
RangeStop: 150,
},
},
consumerInstanceIds: []*ConsumerGroupInstance{
{
InstanceId: "consumer-instance-1",
MaxPartitionCount: 1,
},
{
InstanceId: "consumer-instance-2",
MaxPartitionCount: 1,
},
{
InstanceId: "consumer-instance-3",
MaxPartitionCount: 1,
},
},
prevMapping: &PartitionSlotToConsumerInstanceList{
PartitionSlots: []*PartitionSlotToConsumerInstance{
{
RangeStart: 0,
RangeStop: 50,
AssignedInstanceId: "consumer-instance-1",
},
{
RangeStart: 50,
RangeStop: 100,
AssignedInstanceId: "consumer-instance-2",
},
},
},
},
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
{
RangeStart: 0,
RangeStop: 50,
AssignedInstanceId: "consumer-instance-1",
},
{
RangeStart: 50,
RangeStop: 100,
AssignedInstanceId: "consumer-instance-2",
},
{
RangeStart: 100,
RangeStop: 150,
AssignedInstanceId: "consumer-instance-3",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if gotPartitionSlots := doBalanceSticky(tt.args.partitions, tt.args.consumerInstanceIds, tt.args.prevMapping); !reflect.DeepEqual(gotPartitionSlots, tt.wantPartitionSlots) {
t.Errorf("doBalanceSticky() = %v, want %v", gotPartitionSlots, tt.wantPartitionSlots)
}
})
}
}

View File

@@ -1,7 +1,5 @@
package sub_coordinator
import "time"
type PartitionSlotToConsumerInstance struct {
RangeStart int32
RangeStop int32
@@ -16,10 +14,3 @@ type PartitionSlotToConsumerInstanceList struct {
RingSize int32
Version int64
}
func NewPartitionSlotToConsumerInstanceList(ringSize int32, version time.Time) *PartitionSlotToConsumerInstanceList {
return &PartitionSlotToConsumerInstanceList{
RingSize: ringSize,
Version: version.UnixNano(),
}
}