chore: remove ~50k lines of unreachable dead code (#8913)

* chore: remove unreachable dead code across the codebase

Remove ~50,000 lines of unreachable code identified by static analysis.

Major removals:
- weed/filer/redis_lua: entire unused Redis Lua filer store implementation
- weed/wdclient/net2, resource_pool: unused connection/resource pool packages
- weed/plugin/worker/lifecycle: unused lifecycle plugin worker
- weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy,
  multipart IAM, key rotation, and various SSE helper functions
- weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions
- weed/mq/offset: unused SQL storage and migration code
- weed/worker: unused registry, task, and monitoring functions
- weed/query: unused SQL engine, parquet scanner, and type functions
- weed/shell: unused EC proportional rebalance functions
- weed/storage/erasure_coding/distribution: unused distribution analysis functions
- Individual unreachable functions removed from 150+ files across admin,
  credential, filer, iam, kms, mount, mq, operation, pb, s3api, server,
  shell, storage, topology, and util packages

* fix(s3): reset shared memory store in IAM test to prevent flaky failure

TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because
the MemoryStore credential backend is a singleton registered via init().
Earlier tests that create anonymous identities pollute the shared store,
causing LookupAnonymous() to unexpectedly return true.

Fix by calling Reset() on the memory store before the test runs.

* style: run gofmt on changed files

* fix: restore KMS functions used by integration tests

* fix(plugin): prevent panic on send to closed worker session channel

The Plugin.sendToWorker method could panic with "send on closed channel"
when a worker disconnected while a message was being sent. The race was
between streamSession.close() closing the outgoing channel and sendToWorker
writing to it concurrently.

Add a done channel to streamSession that is closed before the outgoing
channel, and check it in sendToWorker's select to safely detect closed
sessions without panicking.
This commit is contained in:
Chris Lu
2026-04-03 16:04:27 -07:00
committed by GitHub
parent 8fad85aed7
commit 995dfc4d5d
264 changed files with 62 additions and 46027 deletions

View File

@@ -117,11 +117,6 @@ func GetBrokerErrorInfo(code int32) BrokerErrorInfo {
}
}
// GetKafkaErrorCode returns the corresponding Kafka protocol error code for a broker error
func GetKafkaErrorCode(brokerErrorCode int32) int16 {
return GetBrokerErrorInfo(brokerErrorCode).KafkaCode
}
// CreateBrokerError creates a structured broker error with both error code and message
func CreateBrokerError(code int32, message string) (int32, string) {
info := GetBrokerErrorInfo(code)

View File

@@ -1,351 +0,0 @@
package broker
import (
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
)
func createTestTopic() topic.Topic {
return topic.Topic{
Namespace: "test",
Name: "offset-test",
}
}
func createTestPartition() topic.Partition {
return topic.Partition{
RingSize: 1024,
RangeStart: 0,
RangeStop: 31,
UnixTimeNs: time.Now().UnixNano(),
}
}
func TestBrokerOffsetManager_AssignOffset(t *testing.T) {
storage := NewInMemoryOffsetStorageForTesting()
manager := NewBrokerOffsetManagerWithStorage(storage)
testTopic := createTestTopic()
testPartition := createTestPartition()
// Test sequential offset assignment
for i := int64(0); i < 10; i++ {
assignedOffset, err := manager.AssignOffset(testTopic, testPartition)
if err != nil {
t.Fatalf("Failed to assign offset %d: %v", i, err)
}
if assignedOffset != i {
t.Errorf("Expected offset %d, got %d", i, assignedOffset)
}
}
}
func TestBrokerOffsetManager_AssignBatchOffsets(t *testing.T) {
storage := NewInMemoryOffsetStorageForTesting()
manager := NewBrokerOffsetManagerWithStorage(storage)
testTopic := createTestTopic()
testPartition := createTestPartition()
// Assign batch of offsets
baseOffset, lastOffset, err := manager.AssignBatchOffsets(testTopic, testPartition, 5)
if err != nil {
t.Fatalf("Failed to assign batch offsets: %v", err)
}
if baseOffset != 0 {
t.Errorf("Expected base offset 0, got %d", baseOffset)
}
if lastOffset != 4 {
t.Errorf("Expected last offset 4, got %d", lastOffset)
}
// Assign another batch
baseOffset2, lastOffset2, err := manager.AssignBatchOffsets(testTopic, testPartition, 3)
if err != nil {
t.Fatalf("Failed to assign second batch offsets: %v", err)
}
if baseOffset2 != 5 {
t.Errorf("Expected base offset 5, got %d", baseOffset2)
}
if lastOffset2 != 7 {
t.Errorf("Expected last offset 7, got %d", lastOffset2)
}
}
func TestBrokerOffsetManager_GetHighWaterMark(t *testing.T) {
storage := NewInMemoryOffsetStorageForTesting()
manager := NewBrokerOffsetManagerWithStorage(storage)
testTopic := createTestTopic()
testPartition := createTestPartition()
// Initially should be 0
hwm, err := manager.GetHighWaterMark(testTopic, testPartition)
if err != nil {
t.Fatalf("Failed to get initial high water mark: %v", err)
}
if hwm != 0 {
t.Errorf("Expected initial high water mark 0, got %d", hwm)
}
// Assign some offsets
manager.AssignBatchOffsets(testTopic, testPartition, 10)
// High water mark should be updated
hwm, err = manager.GetHighWaterMark(testTopic, testPartition)
if err != nil {
t.Fatalf("Failed to get high water mark after assignment: %v", err)
}
if hwm != 10 {
t.Errorf("Expected high water mark 10, got %d", hwm)
}
}
func TestBrokerOffsetManager_CreateSubscription(t *testing.T) {
storage := NewInMemoryOffsetStorageForTesting()
manager := NewBrokerOffsetManagerWithStorage(storage)
testTopic := createTestTopic()
testPartition := createTestPartition()
// Assign some offsets first
manager.AssignBatchOffsets(testTopic, testPartition, 5)
// Create subscription
sub, err := manager.CreateSubscription(
"test-sub",
testTopic,
testPartition,
schema_pb.OffsetType_RESET_TO_EARLIEST,
0,
)
if err != nil {
t.Fatalf("Failed to create subscription: %v", err)
}
if sub.ID != "test-sub" {
t.Errorf("Expected subscription ID 'test-sub', got %s", sub.ID)
}
if sub.StartOffset != 0 {
t.Errorf("Expected start offset 0, got %d", sub.StartOffset)
}
}
func TestBrokerOffsetManager_GetPartitionOffsetInfo(t *testing.T) {
storage := NewInMemoryOffsetStorageForTesting()
manager := NewBrokerOffsetManagerWithStorage(storage)
testTopic := createTestTopic()
testPartition := createTestPartition()
// Test empty partition
info, err := manager.GetPartitionOffsetInfo(testTopic, testPartition)
if err != nil {
t.Fatalf("Failed to get partition offset info: %v", err)
}
if info.EarliestOffset != 0 {
t.Errorf("Expected earliest offset 0, got %d", info.EarliestOffset)
}
if info.LatestOffset != -1 {
t.Errorf("Expected latest offset -1 for empty partition, got %d", info.LatestOffset)
}
// Assign offsets and test again
manager.AssignBatchOffsets(testTopic, testPartition, 5)
info, err = manager.GetPartitionOffsetInfo(testTopic, testPartition)
if err != nil {
t.Fatalf("Failed to get partition offset info after assignment: %v", err)
}
if info.LatestOffset != 4 {
t.Errorf("Expected latest offset 4, got %d", info.LatestOffset)
}
if info.HighWaterMark != 5 {
t.Errorf("Expected high water mark 5, got %d", info.HighWaterMark)
}
}
func TestBrokerOffsetManager_MultiplePartitions(t *testing.T) {
storage := NewInMemoryOffsetStorageForTesting()
manager := NewBrokerOffsetManagerWithStorage(storage)
testTopic := createTestTopic()
// Create different partitions
partition1 := topic.Partition{
RingSize: 1024,
RangeStart: 0,
RangeStop: 31,
UnixTimeNs: time.Now().UnixNano(),
}
partition2 := topic.Partition{
RingSize: 1024,
RangeStart: 32,
RangeStop: 63,
UnixTimeNs: time.Now().UnixNano(),
}
// Assign offsets to different partitions
assignedOffset1, err := manager.AssignOffset(testTopic, partition1)
if err != nil {
t.Fatalf("Failed to assign offset to partition1: %v", err)
}
assignedOffset2, err := manager.AssignOffset(testTopic, partition2)
if err != nil {
t.Fatalf("Failed to assign offset to partition2: %v", err)
}
// Both should start at 0
if assignedOffset1 != 0 {
t.Errorf("Expected offset 0 for partition1, got %d", assignedOffset1)
}
if assignedOffset2 != 0 {
t.Errorf("Expected offset 0 for partition2, got %d", assignedOffset2)
}
// Assign more offsets to partition1
assignedOffset1_2, err := manager.AssignOffset(testTopic, partition1)
if err != nil {
t.Fatalf("Failed to assign second offset to partition1: %v", err)
}
if assignedOffset1_2 != 1 {
t.Errorf("Expected offset 1 for partition1, got %d", assignedOffset1_2)
}
// Partition2 should still be at 0 for next assignment
assignedOffset2_2, err := manager.AssignOffset(testTopic, partition2)
if err != nil {
t.Fatalf("Failed to assign second offset to partition2: %v", err)
}
if assignedOffset2_2 != 1 {
t.Errorf("Expected offset 1 for partition2, got %d", assignedOffset2_2)
}
}
func TestOffsetAwarePublisher(t *testing.T) {
storage := NewInMemoryOffsetStorageForTesting()
manager := NewBrokerOffsetManagerWithStorage(storage)
testTopic := createTestTopic()
testPartition := createTestPartition()
// Create a mock local partition (simplified for testing)
localPartition := &topic.LocalPartition{}
// Create offset assignment function
assignOffsetFn := func() (int64, error) {
return manager.AssignOffset(testTopic, testPartition)
}
// Create offset-aware publisher
publisher := topic.NewOffsetAwarePublisher(localPartition, assignOffsetFn)
if publisher.GetPartition() != localPartition {
t.Error("Publisher should return the correct partition")
}
// Test would require more setup to actually publish messages
// This tests the basic structure
}
func TestBrokerOffsetManager_GetOffsetMetrics(t *testing.T) {
storage := NewInMemoryOffsetStorageForTesting()
manager := NewBrokerOffsetManagerWithStorage(storage)
testTopic := createTestTopic()
testPartition := createTestPartition()
// Initial metrics
metrics := manager.GetOffsetMetrics()
if metrics.TotalOffsets != 0 {
t.Errorf("Expected 0 total offsets initially, got %d", metrics.TotalOffsets)
}
// Assign some offsets
manager.AssignBatchOffsets(testTopic, testPartition, 5)
// Create subscription
manager.CreateSubscription("test-sub", testTopic, testPartition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
// Check updated metrics
metrics = manager.GetOffsetMetrics()
if metrics.PartitionCount != 1 {
t.Errorf("Expected 1 partition, got %d", metrics.PartitionCount)
}
}
func TestBrokerOffsetManager_AssignOffsetsWithResult(t *testing.T) {
storage := NewInMemoryOffsetStorageForTesting()
manager := NewBrokerOffsetManagerWithStorage(storage)
testTopic := createTestTopic()
testPartition := createTestPartition()
// Assign offsets with result
result := manager.AssignOffsetsWithResult(testTopic, testPartition, 3)
if result.Error != nil {
t.Fatalf("Expected no error, got: %v", result.Error)
}
if result.BaseOffset != 0 {
t.Errorf("Expected base offset 0, got %d", result.BaseOffset)
}
if result.LastOffset != 2 {
t.Errorf("Expected last offset 2, got %d", result.LastOffset)
}
if result.Count != 3 {
t.Errorf("Expected count 3, got %d", result.Count)
}
if result.Topic != testTopic {
t.Error("Topic mismatch in result")
}
if result.Partition != testPartition {
t.Error("Partition mismatch in result")
}
if result.Timestamp <= 0 {
t.Error("Timestamp should be set")
}
}
func TestBrokerOffsetManager_Shutdown(t *testing.T) {
storage := NewInMemoryOffsetStorageForTesting()
manager := NewBrokerOffsetManagerWithStorage(storage)
testTopic := createTestTopic()
testPartition := createTestPartition()
// Assign some offsets and create subscriptions
manager.AssignBatchOffsets(testTopic, testPartition, 5)
manager.CreateSubscription("test-sub", testTopic, testPartition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
// Shutdown should not panic
manager.Shutdown()
// After shutdown, operations should still work (using new managers)
offset, err := manager.AssignOffset(testTopic, testPartition)
if err != nil {
t.Fatalf("Operations should still work after shutdown: %v", err)
}
// Should start from 0 again (new manager)
if offset != 0 {
t.Errorf("Expected offset 0 after shutdown, got %d", offset)
}
}

View File

@@ -203,14 +203,6 @@ func (b *MessageQueueBroker) GetDataCenter() string {
}
func (b *MessageQueueBroker) withMasterClient(streamingMode bool, master pb.ServerAddress, fn func(client master_pb.SeaweedClient) error) error {
return pb.WithMasterClient(streamingMode, master, b.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
return fn(client)
})
}
func (b *MessageQueueBroker) withBrokerClient(streamingMode bool, server pb.ServerAddress, fn func(client mq_pb.SeaweedMessagingClient) error) error {
return pb.WithBrokerGrpcClient(streamingMode, server.String(), b.grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error {