chore: remove ~50k lines of unreachable dead code (#8913)

* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
This commit is contained in:
Chris Lu
2026-04-03 16:04:27 -07:00
committed by GitHub
parent 8fad85aed7
commit 995dfc4d5d
264 changed files with 62 additions and 46027 deletions

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"io"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/filer_client"
@@ -192,10 +191,6 @@ func (f *FilerStorage) getOffsetPath(group, topic string, partition int32) strin
return fmt.Sprintf("%s/offset", f.getPartitionPath(group, topic, partition))
}
func (f *FilerStorage) getMetadataPath(group, topic string, partition int32) string {
return fmt.Sprintf("%s/metadata", f.getPartitionPath(group, topic, partition))
}
func (f *FilerStorage) writeFile(path string, data []byte) error {
fullPath := util.FullPath(path)
dir, name := fullPath.DirAndName()
@@ -311,16 +306,3 @@ func (f *FilerStorage) deleteDirectory(path string) error {
return err
})
}
// normalizePath removes leading/trailing slashes and collapses multiple slashes
func normalizePath(path string) string {
path = strings.Trim(path, "/")
parts := strings.Split(path, "/")
normalized := []string{}
for _, part := range parts {
if part != "" {
normalized = append(normalized, part)
}
}
return "/" + strings.Join(normalized, "/")
}

View File

@@ -1,65 +0,0 @@
package consumer_offset
import (
"testing"
"github.com/stretchr/testify/assert"
)
// Note: These tests require a running filer instance
// They are marked as integration tests and should be run with:
// go test -tags=integration
func TestFilerStorageCommitAndFetch(t *testing.T) {
t.Skip("Requires running filer - integration test")
// This will be implemented once we have test infrastructure
// Test will:
// 1. Create filer storage
// 2. Commit offset
// 3. Fetch offset
// 4. Verify values match
}
func TestFilerStoragePersistence(t *testing.T) {
t.Skip("Requires running filer - integration test")
// Test will:
// 1. Commit offset with first storage instance
// 2. Close first instance
// 3. Create new storage instance
// 4. Fetch offset and verify it persisted
}
func TestFilerStorageMultipleGroups(t *testing.T) {
t.Skip("Requires running filer - integration test")
// Test will:
// 1. Commit offsets for multiple groups
// 2. Fetch all offsets per group
// 3. Verify isolation between groups
}
func TestFilerStoragePath(t *testing.T) {
// Test path generation (doesn't require filer)
storage := &FilerStorage{}
group := "test-group"
topic := "test-topic"
partition := int32(5)
groupPath := storage.getGroupPath(group)
assert.Equal(t, ConsumerOffsetsBasePath+"/test-group", groupPath)
topicPath := storage.getTopicPath(group, topic)
assert.Equal(t, ConsumerOffsetsBasePath+"/test-group/test-topic", topicPath)
partitionPath := storage.getPartitionPath(group, topic, partition)
assert.Equal(t, ConsumerOffsetsBasePath+"/test-group/test-topic/5", partitionPath)
offsetPath := storage.getOffsetPath(group, topic, partition)
assert.Equal(t, ConsumerOffsetsBasePath+"/test-group/test-topic/5/offset", offsetPath)
metadataPath := storage.getMetadataPath(group, topic, partition)
assert.Equal(t, ConsumerOffsetsBasePath+"/test-group/test-topic/5/metadata", metadataPath)
}

View File

@@ -278,38 +278,3 @@ func (h *SeaweedMQHandler) checkTopicInFiler(topicName string) bool {
return exists
}
// listTopicsFromFiler lists all topics from the filer
func (h *SeaweedMQHandler) listTopicsFromFiler() []string {
if h.filerClientAccessor == nil {
return []string{}
}
var topics []string
h.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.ListEntriesRequest{
Directory: "/topics/kafka",
}
stream, err := client.ListEntries(context.Background(), request)
if err != nil {
return nil // Don't propagate error, just return empty list
}
for {
resp, err := stream.Recv()
if err != nil {
break // End of stream or error
}
if resp.Entry != nil && resp.Entry.IsDirectory {
topics = append(topics, resp.Entry.Name)
} else if resp.Entry != nil {
}
}
return nil
})
return topics
}

View File

@@ -1,53 +0,0 @@
package kafka
import (
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
)
// Convenience functions for partition mapping used by production code
// The full PartitionMapper implementation is in partition_mapping_test.go for testing
// MapKafkaPartitionToSMQRange maps a Kafka partition to SeaweedMQ ring range
func MapKafkaPartitionToSMQRange(kafkaPartition int32) (rangeStart, rangeStop int32) {
// Use a range size that divides evenly into MaxPartitionCount (2520)
// Range size 35 gives us exactly 72 Kafka partitions: 2520 / 35 = 72
rangeSize := int32(35)
rangeStart = kafkaPartition * rangeSize
rangeStop = rangeStart + rangeSize - 1
return rangeStart, rangeStop
}
// CreateSMQPartition creates a SeaweedMQ partition from a Kafka partition
func CreateSMQPartition(kafkaPartition int32, unixTimeNs int64) *schema_pb.Partition {
rangeStart, rangeStop := MapKafkaPartitionToSMQRange(kafkaPartition)
return &schema_pb.Partition{
RingSize: pub_balancer.MaxPartitionCount,
RangeStart: rangeStart,
RangeStop: rangeStop,
UnixTimeNs: unixTimeNs,
}
}
// ExtractKafkaPartitionFromSMQRange extracts the Kafka partition from SeaweedMQ range
func ExtractKafkaPartitionFromSMQRange(rangeStart int32) int32 {
rangeSize := int32(35)
return rangeStart / rangeSize
}
// ValidateKafkaPartition validates that a Kafka partition is within supported range
func ValidateKafkaPartition(kafkaPartition int32) bool {
maxPartitions := int32(pub_balancer.MaxPartitionCount) / 35 // 72 partitions
return kafkaPartition >= 0 && kafkaPartition < maxPartitions
}
// GetRangeSize returns the range size used for partition mapping
func GetRangeSize() int32 {
return 35
}
// GetMaxKafkaPartitions returns the maximum number of Kafka partitions supported
func GetMaxKafkaPartitions() int32 {
return int32(pub_balancer.MaxPartitionCount) / 35 // 72 partitions
}

View File

@@ -1,294 +0,0 @@
package kafka
import (
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
)
// PartitionMapper provides consistent Kafka partition to SeaweedMQ ring mapping
// NOTE: This is test-only code and not used in the actual Kafka Gateway implementation
type PartitionMapper struct{}
// NewPartitionMapper creates a new partition mapper
func NewPartitionMapper() *PartitionMapper {
return &PartitionMapper{}
}
// GetRangeSize returns the consistent range size for Kafka partition mapping
// This ensures all components use the same calculation
func (pm *PartitionMapper) GetRangeSize() int32 {
// Use a range size that divides evenly into MaxPartitionCount (2520)
// Range size 35 gives us exactly 72 Kafka partitions: 2520 / 35 = 72
// This provides a good balance between partition granularity and ring utilization
return 35
}
// GetMaxKafkaPartitions returns the maximum number of Kafka partitions supported
func (pm *PartitionMapper) GetMaxKafkaPartitions() int32 {
// With range size 35, we can support: 2520 / 35 = 72 Kafka partitions
return int32(pub_balancer.MaxPartitionCount) / pm.GetRangeSize()
}
// MapKafkaPartitionToSMQRange maps a Kafka partition to SeaweedMQ ring range
func (pm *PartitionMapper) MapKafkaPartitionToSMQRange(kafkaPartition int32) (rangeStart, rangeStop int32) {
rangeSize := pm.GetRangeSize()
rangeStart = kafkaPartition * rangeSize
rangeStop = rangeStart + rangeSize - 1
return rangeStart, rangeStop
}
// CreateSMQPartition creates a SeaweedMQ partition from a Kafka partition
func (pm *PartitionMapper) CreateSMQPartition(kafkaPartition int32, unixTimeNs int64) *schema_pb.Partition {
rangeStart, rangeStop := pm.MapKafkaPartitionToSMQRange(kafkaPartition)
return &schema_pb.Partition{
RingSize: pub_balancer.MaxPartitionCount,
RangeStart: rangeStart,
RangeStop: rangeStop,
UnixTimeNs: unixTimeNs,
}
}
// ExtractKafkaPartitionFromSMQRange extracts the Kafka partition from SeaweedMQ range
func (pm *PartitionMapper) ExtractKafkaPartitionFromSMQRange(rangeStart int32) int32 {
rangeSize := pm.GetRangeSize()
return rangeStart / rangeSize
}
// ValidateKafkaPartition validates that a Kafka partition is within supported range
func (pm *PartitionMapper) ValidateKafkaPartition(kafkaPartition int32) bool {
return kafkaPartition >= 0 && kafkaPartition < pm.GetMaxKafkaPartitions()
}
// GetPartitionMappingInfo returns debug information about the partition mapping
func (pm *PartitionMapper) GetPartitionMappingInfo() map[string]interface{} {
return map[string]interface{}{
"ring_size": pub_balancer.MaxPartitionCount,
"range_size": pm.GetRangeSize(),
"max_kafka_partitions": pm.GetMaxKafkaPartitions(),
"ring_utilization": float64(pm.GetMaxKafkaPartitions()*pm.GetRangeSize()) / float64(pub_balancer.MaxPartitionCount),
}
}
// Global instance for consistent usage across the test codebase
var DefaultPartitionMapper = NewPartitionMapper()
func TestPartitionMapper_GetRangeSize(t *testing.T) {
mapper := NewPartitionMapper()
rangeSize := mapper.GetRangeSize()
if rangeSize != 35 {
t.Errorf("Expected range size 35, got %d", rangeSize)
}
// Verify that the range size divides evenly into available partitions
maxPartitions := mapper.GetMaxKafkaPartitions()
totalUsed := maxPartitions * rangeSize
if totalUsed > int32(pub_balancer.MaxPartitionCount) {
t.Errorf("Total used slots (%d) exceeds MaxPartitionCount (%d)", totalUsed, pub_balancer.MaxPartitionCount)
}
t.Logf("Range size: %d, Max Kafka partitions: %d, Ring utilization: %.2f%%",
rangeSize, maxPartitions, float64(totalUsed)/float64(pub_balancer.MaxPartitionCount)*100)
}
func TestPartitionMapper_MapKafkaPartitionToSMQRange(t *testing.T) {
mapper := NewPartitionMapper()
tests := []struct {
kafkaPartition int32
expectedStart int32
expectedStop int32
}{
{0, 0, 34},
{1, 35, 69},
{2, 70, 104},
{10, 350, 384},
}
for _, tt := range tests {
t.Run("", func(t *testing.T) {
start, stop := mapper.MapKafkaPartitionToSMQRange(tt.kafkaPartition)
if start != tt.expectedStart {
t.Errorf("Kafka partition %d: expected start %d, got %d", tt.kafkaPartition, tt.expectedStart, start)
}
if stop != tt.expectedStop {
t.Errorf("Kafka partition %d: expected stop %d, got %d", tt.kafkaPartition, tt.expectedStop, stop)
}
// Verify range size is consistent
rangeSize := stop - start + 1
if rangeSize != mapper.GetRangeSize() {
t.Errorf("Inconsistent range size: expected %d, got %d", mapper.GetRangeSize(), rangeSize)
}
})
}
}
func TestPartitionMapper_ExtractKafkaPartitionFromSMQRange(t *testing.T) {
mapper := NewPartitionMapper()
tests := []struct {
rangeStart int32
expectedKafka int32
}{
{0, 0},
{35, 1},
{70, 2},
{350, 10},
}
for _, tt := range tests {
t.Run("", func(t *testing.T) {
kafkaPartition := mapper.ExtractKafkaPartitionFromSMQRange(tt.rangeStart)
if kafkaPartition != tt.expectedKafka {
t.Errorf("Range start %d: expected Kafka partition %d, got %d",
tt.rangeStart, tt.expectedKafka, kafkaPartition)
}
})
}
}
func TestPartitionMapper_RoundTrip(t *testing.T) {
mapper := NewPartitionMapper()
// Test round-trip conversion for all valid Kafka partitions
maxPartitions := mapper.GetMaxKafkaPartitions()
for kafkaPartition := int32(0); kafkaPartition < maxPartitions; kafkaPartition++ {
// Kafka -> SMQ -> Kafka
rangeStart, rangeStop := mapper.MapKafkaPartitionToSMQRange(kafkaPartition)
extractedKafka := mapper.ExtractKafkaPartitionFromSMQRange(rangeStart)
if extractedKafka != kafkaPartition {
t.Errorf("Round-trip failed for partition %d: got %d", kafkaPartition, extractedKafka)
}
// Verify no overlap with next partition
if kafkaPartition < maxPartitions-1 {
nextStart, _ := mapper.MapKafkaPartitionToSMQRange(kafkaPartition + 1)
if rangeStop >= nextStart {
t.Errorf("Partition %d range [%d,%d] overlaps with partition %d start %d",
kafkaPartition, rangeStart, rangeStop, kafkaPartition+1, nextStart)
}
}
}
}
func TestPartitionMapper_CreateSMQPartition(t *testing.T) {
mapper := NewPartitionMapper()
kafkaPartition := int32(5)
unixTimeNs := time.Now().UnixNano()
partition := mapper.CreateSMQPartition(kafkaPartition, unixTimeNs)
if partition.RingSize != pub_balancer.MaxPartitionCount {
t.Errorf("Expected ring size %d, got %d", pub_balancer.MaxPartitionCount, partition.RingSize)
}
expectedStart, expectedStop := mapper.MapKafkaPartitionToSMQRange(kafkaPartition)
if partition.RangeStart != expectedStart {
t.Errorf("Expected range start %d, got %d", expectedStart, partition.RangeStart)
}
if partition.RangeStop != expectedStop {
t.Errorf("Expected range stop %d, got %d", expectedStop, partition.RangeStop)
}
if partition.UnixTimeNs != unixTimeNs {
t.Errorf("Expected timestamp %d, got %d", unixTimeNs, partition.UnixTimeNs)
}
}
func TestPartitionMapper_ValidateKafkaPartition(t *testing.T) {
mapper := NewPartitionMapper()
tests := []struct {
partition int32
valid bool
}{
{-1, false},
{0, true},
{1, true},
{mapper.GetMaxKafkaPartitions() - 1, true},
{mapper.GetMaxKafkaPartitions(), false},
{1000, false},
}
for _, tt := range tests {
t.Run("", func(t *testing.T) {
valid := mapper.ValidateKafkaPartition(tt.partition)
if valid != tt.valid {
t.Errorf("Partition %d: expected valid=%v, got %v", tt.partition, tt.valid, valid)
}
})
}
}
func TestPartitionMapper_ConsistencyWithGlobalFunctions(t *testing.T) {
mapper := NewPartitionMapper()
kafkaPartition := int32(7)
unixTimeNs := time.Now().UnixNano()
// Test that global functions produce same results as mapper methods
start1, stop1 := mapper.MapKafkaPartitionToSMQRange(kafkaPartition)
start2, stop2 := MapKafkaPartitionToSMQRange(kafkaPartition)
if start1 != start2 || stop1 != stop2 {
t.Errorf("Global function inconsistent: mapper=(%d,%d), global=(%d,%d)",
start1, stop1, start2, stop2)
}
partition1 := mapper.CreateSMQPartition(kafkaPartition, unixTimeNs)
partition2 := CreateSMQPartition(kafkaPartition, unixTimeNs)
if partition1.RangeStart != partition2.RangeStart || partition1.RangeStop != partition2.RangeStop {
t.Errorf("Global CreateSMQPartition inconsistent")
}
extracted1 := mapper.ExtractKafkaPartitionFromSMQRange(start1)
extracted2 := ExtractKafkaPartitionFromSMQRange(start1)
if extracted1 != extracted2 {
t.Errorf("Global ExtractKafkaPartitionFromSMQRange inconsistent: %d vs %d", extracted1, extracted2)
}
}
func TestPartitionMapper_GetPartitionMappingInfo(t *testing.T) {
mapper := NewPartitionMapper()
info := mapper.GetPartitionMappingInfo()
// Verify all expected keys are present
expectedKeys := []string{"ring_size", "range_size", "max_kafka_partitions", "ring_utilization"}
for _, key := range expectedKeys {
if _, exists := info[key]; !exists {
t.Errorf("Missing key in mapping info: %s", key)
}
}
// Verify values are reasonable
if info["ring_size"].(int) != pub_balancer.MaxPartitionCount {
t.Errorf("Incorrect ring_size in info")
}
if info["range_size"].(int32) != mapper.GetRangeSize() {
t.Errorf("Incorrect range_size in info")
}
utilization := info["ring_utilization"].(float64)
if utilization <= 0 || utilization > 1 {
t.Errorf("Invalid ring utilization: %f", utilization)
}
t.Logf("Partition mapping info: %+v", info)
}