chore: remove ~50k lines of unreachable dead code (#8913)
* chore: remove unreachable dead code across the codebase Remove ~50,000 lines of unreachable code identified by static analysis. Major removals: - weed/filer/redis_lua: entire unused Redis Lua filer store implementation - weed/wdclient/net2, resource_pool: unused connection/resource pool packages - weed/plugin/worker/lifecycle: unused lifecycle plugin worker - weed/s3api: unused S3 policy templates, presigned URL IAM, streaming copy, multipart IAM, key rotation, and various SSE helper functions - weed/mq/kafka: unused partition mapping, compression, schema, and protocol functions - weed/mq/offset: unused SQL storage and migration code - weed/worker: unused registry, task, and monitoring functions - weed/query: unused SQL engine, parquet scanner, and type functions - weed/shell: unused EC proportional rebalance functions - weed/storage/erasure_coding/distribution: unused distribution analysis functions - Individual unreachable functions removed from 150+ files across admin, credential, filer, iam, kms, mount, mq, operation, pb, s3api, server, shell, storage, topology, and util packages * fix(s3): reset shared memory store in IAM test to prevent flaky failure TestLoadIAMManagerFromConfig_EmptyConfigWithFallbackKey was flaky because the MemoryStore credential backend is a singleton registered via init(). Earlier tests that create anonymous identities pollute the shared store, causing LookupAnonymous() to unexpectedly return true. Fix by calling Reset() on the memory store before the test runs. * style: run gofmt on changed files * fix: restore KMS functions used by integration tests * fix(plugin): prevent panic on send to closed worker session channel The Plugin.sendToWorker method could panic with "send on closed channel" when a worker disconnected while a message was being sent. The race was between streamSession.close() closing the outgoing channel and sendToWorker writing to it concurrently. Add a done channel to streamSession that is closed before the outgoing channel, and check it in sendToWorker's select to safely detect closed sessions without panicking.
This commit is contained in:
@@ -117,11 +117,6 @@ func GetBrokerErrorInfo(code int32) BrokerErrorInfo {
|
||||
}
|
||||
}
|
||||
|
||||
// GetKafkaErrorCode returns the corresponding Kafka protocol error code for a broker error
|
||||
func GetKafkaErrorCode(brokerErrorCode int32) int16 {
|
||||
return GetBrokerErrorInfo(brokerErrorCode).KafkaCode
|
||||
}
|
||||
|
||||
// CreateBrokerError creates a structured broker error with both error code and message
|
||||
func CreateBrokerError(code int32, message string) (int32, string) {
|
||||
info := GetBrokerErrorInfo(code)
|
||||
|
||||
@@ -1,351 +0,0 @@
|
||||
package broker
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
)
|
||||
|
||||
func createTestTopic() topic.Topic {
|
||||
return topic.Topic{
|
||||
Namespace: "test",
|
||||
Name: "offset-test",
|
||||
}
|
||||
}
|
||||
|
||||
func createTestPartition() topic.Partition {
|
||||
return topic.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrokerOffsetManager_AssignOffset(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorageForTesting()
|
||||
manager := NewBrokerOffsetManagerWithStorage(storage)
|
||||
testTopic := createTestTopic()
|
||||
testPartition := createTestPartition()
|
||||
|
||||
// Test sequential offset assignment
|
||||
for i := int64(0); i < 10; i++ {
|
||||
assignedOffset, err := manager.AssignOffset(testTopic, testPartition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assign offset %d: %v", i, err)
|
||||
}
|
||||
|
||||
if assignedOffset != i {
|
||||
t.Errorf("Expected offset %d, got %d", i, assignedOffset)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrokerOffsetManager_AssignBatchOffsets(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorageForTesting()
|
||||
manager := NewBrokerOffsetManagerWithStorage(storage)
|
||||
testTopic := createTestTopic()
|
||||
testPartition := createTestPartition()
|
||||
|
||||
// Assign batch of offsets
|
||||
baseOffset, lastOffset, err := manager.AssignBatchOffsets(testTopic, testPartition, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assign batch offsets: %v", err)
|
||||
}
|
||||
|
||||
if baseOffset != 0 {
|
||||
t.Errorf("Expected base offset 0, got %d", baseOffset)
|
||||
}
|
||||
|
||||
if lastOffset != 4 {
|
||||
t.Errorf("Expected last offset 4, got %d", lastOffset)
|
||||
}
|
||||
|
||||
// Assign another batch
|
||||
baseOffset2, lastOffset2, err := manager.AssignBatchOffsets(testTopic, testPartition, 3)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assign second batch offsets: %v", err)
|
||||
}
|
||||
|
||||
if baseOffset2 != 5 {
|
||||
t.Errorf("Expected base offset 5, got %d", baseOffset2)
|
||||
}
|
||||
|
||||
if lastOffset2 != 7 {
|
||||
t.Errorf("Expected last offset 7, got %d", lastOffset2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrokerOffsetManager_GetHighWaterMark(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorageForTesting()
|
||||
manager := NewBrokerOffsetManagerWithStorage(storage)
|
||||
testTopic := createTestTopic()
|
||||
testPartition := createTestPartition()
|
||||
|
||||
// Initially should be 0
|
||||
hwm, err := manager.GetHighWaterMark(testTopic, testPartition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get initial high water mark: %v", err)
|
||||
}
|
||||
|
||||
if hwm != 0 {
|
||||
t.Errorf("Expected initial high water mark 0, got %d", hwm)
|
||||
}
|
||||
|
||||
// Assign some offsets
|
||||
manager.AssignBatchOffsets(testTopic, testPartition, 10)
|
||||
|
||||
// High water mark should be updated
|
||||
hwm, err = manager.GetHighWaterMark(testTopic, testPartition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get high water mark after assignment: %v", err)
|
||||
}
|
||||
|
||||
if hwm != 10 {
|
||||
t.Errorf("Expected high water mark 10, got %d", hwm)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrokerOffsetManager_CreateSubscription(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorageForTesting()
|
||||
manager := NewBrokerOffsetManagerWithStorage(storage)
|
||||
testTopic := createTestTopic()
|
||||
testPartition := createTestPartition()
|
||||
|
||||
// Assign some offsets first
|
||||
manager.AssignBatchOffsets(testTopic, testPartition, 5)
|
||||
|
||||
// Create subscription
|
||||
sub, err := manager.CreateSubscription(
|
||||
"test-sub",
|
||||
testTopic,
|
||||
testPartition,
|
||||
schema_pb.OffsetType_RESET_TO_EARLIEST,
|
||||
0,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
if sub.ID != "test-sub" {
|
||||
t.Errorf("Expected subscription ID 'test-sub', got %s", sub.ID)
|
||||
}
|
||||
|
||||
if sub.StartOffset != 0 {
|
||||
t.Errorf("Expected start offset 0, got %d", sub.StartOffset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrokerOffsetManager_GetPartitionOffsetInfo(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorageForTesting()
|
||||
manager := NewBrokerOffsetManagerWithStorage(storage)
|
||||
testTopic := createTestTopic()
|
||||
testPartition := createTestPartition()
|
||||
|
||||
// Test empty partition
|
||||
info, err := manager.GetPartitionOffsetInfo(testTopic, testPartition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get partition offset info: %v", err)
|
||||
}
|
||||
|
||||
if info.EarliestOffset != 0 {
|
||||
t.Errorf("Expected earliest offset 0, got %d", info.EarliestOffset)
|
||||
}
|
||||
|
||||
if info.LatestOffset != -1 {
|
||||
t.Errorf("Expected latest offset -1 for empty partition, got %d", info.LatestOffset)
|
||||
}
|
||||
|
||||
// Assign offsets and test again
|
||||
manager.AssignBatchOffsets(testTopic, testPartition, 5)
|
||||
|
||||
info, err = manager.GetPartitionOffsetInfo(testTopic, testPartition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get partition offset info after assignment: %v", err)
|
||||
}
|
||||
|
||||
if info.LatestOffset != 4 {
|
||||
t.Errorf("Expected latest offset 4, got %d", info.LatestOffset)
|
||||
}
|
||||
|
||||
if info.HighWaterMark != 5 {
|
||||
t.Errorf("Expected high water mark 5, got %d", info.HighWaterMark)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrokerOffsetManager_MultiplePartitions(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorageForTesting()
|
||||
manager := NewBrokerOffsetManagerWithStorage(storage)
|
||||
testTopic := createTestTopic()
|
||||
|
||||
// Create different partitions
|
||||
partition1 := topic.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
partition2 := topic.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 32,
|
||||
RangeStop: 63,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
// Assign offsets to different partitions
|
||||
assignedOffset1, err := manager.AssignOffset(testTopic, partition1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assign offset to partition1: %v", err)
|
||||
}
|
||||
|
||||
assignedOffset2, err := manager.AssignOffset(testTopic, partition2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assign offset to partition2: %v", err)
|
||||
}
|
||||
|
||||
// Both should start at 0
|
||||
if assignedOffset1 != 0 {
|
||||
t.Errorf("Expected offset 0 for partition1, got %d", assignedOffset1)
|
||||
}
|
||||
|
||||
if assignedOffset2 != 0 {
|
||||
t.Errorf("Expected offset 0 for partition2, got %d", assignedOffset2)
|
||||
}
|
||||
|
||||
// Assign more offsets to partition1
|
||||
assignedOffset1_2, err := manager.AssignOffset(testTopic, partition1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assign second offset to partition1: %v", err)
|
||||
}
|
||||
|
||||
if assignedOffset1_2 != 1 {
|
||||
t.Errorf("Expected offset 1 for partition1, got %d", assignedOffset1_2)
|
||||
}
|
||||
|
||||
// Partition2 should still be at 0 for next assignment
|
||||
assignedOffset2_2, err := manager.AssignOffset(testTopic, partition2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assign second offset to partition2: %v", err)
|
||||
}
|
||||
|
||||
if assignedOffset2_2 != 1 {
|
||||
t.Errorf("Expected offset 1 for partition2, got %d", assignedOffset2_2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetAwarePublisher(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorageForTesting()
|
||||
manager := NewBrokerOffsetManagerWithStorage(storage)
|
||||
testTopic := createTestTopic()
|
||||
testPartition := createTestPartition()
|
||||
|
||||
// Create a mock local partition (simplified for testing)
|
||||
localPartition := &topic.LocalPartition{}
|
||||
|
||||
// Create offset assignment function
|
||||
assignOffsetFn := func() (int64, error) {
|
||||
return manager.AssignOffset(testTopic, testPartition)
|
||||
}
|
||||
|
||||
// Create offset-aware publisher
|
||||
publisher := topic.NewOffsetAwarePublisher(localPartition, assignOffsetFn)
|
||||
|
||||
if publisher.GetPartition() != localPartition {
|
||||
t.Error("Publisher should return the correct partition")
|
||||
}
|
||||
|
||||
// Test would require more setup to actually publish messages
|
||||
// This tests the basic structure
|
||||
}
|
||||
|
||||
func TestBrokerOffsetManager_GetOffsetMetrics(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorageForTesting()
|
||||
manager := NewBrokerOffsetManagerWithStorage(storage)
|
||||
testTopic := createTestTopic()
|
||||
testPartition := createTestPartition()
|
||||
|
||||
// Initial metrics
|
||||
metrics := manager.GetOffsetMetrics()
|
||||
if metrics.TotalOffsets != 0 {
|
||||
t.Errorf("Expected 0 total offsets initially, got %d", metrics.TotalOffsets)
|
||||
}
|
||||
|
||||
// Assign some offsets
|
||||
manager.AssignBatchOffsets(testTopic, testPartition, 5)
|
||||
|
||||
// Create subscription
|
||||
manager.CreateSubscription("test-sub", testTopic, testPartition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
|
||||
|
||||
// Check updated metrics
|
||||
metrics = manager.GetOffsetMetrics()
|
||||
if metrics.PartitionCount != 1 {
|
||||
t.Errorf("Expected 1 partition, got %d", metrics.PartitionCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrokerOffsetManager_AssignOffsetsWithResult(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorageForTesting()
|
||||
manager := NewBrokerOffsetManagerWithStorage(storage)
|
||||
testTopic := createTestTopic()
|
||||
testPartition := createTestPartition()
|
||||
|
||||
// Assign offsets with result
|
||||
result := manager.AssignOffsetsWithResult(testTopic, testPartition, 3)
|
||||
|
||||
if result.Error != nil {
|
||||
t.Fatalf("Expected no error, got: %v", result.Error)
|
||||
}
|
||||
|
||||
if result.BaseOffset != 0 {
|
||||
t.Errorf("Expected base offset 0, got %d", result.BaseOffset)
|
||||
}
|
||||
|
||||
if result.LastOffset != 2 {
|
||||
t.Errorf("Expected last offset 2, got %d", result.LastOffset)
|
||||
}
|
||||
|
||||
if result.Count != 3 {
|
||||
t.Errorf("Expected count 3, got %d", result.Count)
|
||||
}
|
||||
|
||||
if result.Topic != testTopic {
|
||||
t.Error("Topic mismatch in result")
|
||||
}
|
||||
|
||||
if result.Partition != testPartition {
|
||||
t.Error("Partition mismatch in result")
|
||||
}
|
||||
|
||||
if result.Timestamp <= 0 {
|
||||
t.Error("Timestamp should be set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrokerOffsetManager_Shutdown(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorageForTesting()
|
||||
manager := NewBrokerOffsetManagerWithStorage(storage)
|
||||
testTopic := createTestTopic()
|
||||
testPartition := createTestPartition()
|
||||
|
||||
// Assign some offsets and create subscriptions
|
||||
manager.AssignBatchOffsets(testTopic, testPartition, 5)
|
||||
manager.CreateSubscription("test-sub", testTopic, testPartition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
|
||||
|
||||
// Shutdown should not panic
|
||||
manager.Shutdown()
|
||||
|
||||
// After shutdown, operations should still work (using new managers)
|
||||
offset, err := manager.AssignOffset(testTopic, testPartition)
|
||||
if err != nil {
|
||||
t.Fatalf("Operations should still work after shutdown: %v", err)
|
||||
}
|
||||
|
||||
// Should start from 0 again (new manager)
|
||||
if offset != 0 {
|
||||
t.Errorf("Expected offset 0 after shutdown, got %d", offset)
|
||||
}
|
||||
}
|
||||
@@ -203,14 +203,6 @@ func (b *MessageQueueBroker) GetDataCenter() string {
|
||||
|
||||
}
|
||||
|
||||
func (b *MessageQueueBroker) withMasterClient(streamingMode bool, master pb.ServerAddress, fn func(client master_pb.SeaweedClient) error) error {
|
||||
|
||||
return pb.WithMasterClient(streamingMode, master, b.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
|
||||
return fn(client)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func (b *MessageQueueBroker) withBrokerClient(streamingMode bool, server pb.ServerAddress, fn func(client mq_pb.SeaweedMessagingClient) error) error {
|
||||
|
||||
return pb.WithBrokerGrpcClient(streamingMode, server.String(), b.grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error {
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer_client"
|
||||
@@ -192,10 +191,6 @@ func (f *FilerStorage) getOffsetPath(group, topic string, partition int32) strin
|
||||
return fmt.Sprintf("%s/offset", f.getPartitionPath(group, topic, partition))
|
||||
}
|
||||
|
||||
func (f *FilerStorage) getMetadataPath(group, topic string, partition int32) string {
|
||||
return fmt.Sprintf("%s/metadata", f.getPartitionPath(group, topic, partition))
|
||||
}
|
||||
|
||||
func (f *FilerStorage) writeFile(path string, data []byte) error {
|
||||
fullPath := util.FullPath(path)
|
||||
dir, name := fullPath.DirAndName()
|
||||
@@ -311,16 +306,3 @@ func (f *FilerStorage) deleteDirectory(path string) error {
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// normalizePath removes leading/trailing slashes and collapses multiple slashes
|
||||
func normalizePath(path string) string {
|
||||
path = strings.Trim(path, "/")
|
||||
parts := strings.Split(path, "/")
|
||||
normalized := []string{}
|
||||
for _, part := range parts {
|
||||
if part != "" {
|
||||
normalized = append(normalized, part)
|
||||
}
|
||||
}
|
||||
return "/" + strings.Join(normalized, "/")
|
||||
}
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
package consumer_offset
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Note: These tests require a running filer instance
|
||||
// They are marked as integration tests and should be run with:
|
||||
// go test -tags=integration
|
||||
|
||||
func TestFilerStorageCommitAndFetch(t *testing.T) {
|
||||
t.Skip("Requires running filer - integration test")
|
||||
|
||||
// This will be implemented once we have test infrastructure
|
||||
// Test will:
|
||||
// 1. Create filer storage
|
||||
// 2. Commit offset
|
||||
// 3. Fetch offset
|
||||
// 4. Verify values match
|
||||
}
|
||||
|
||||
func TestFilerStoragePersistence(t *testing.T) {
|
||||
t.Skip("Requires running filer - integration test")
|
||||
|
||||
// Test will:
|
||||
// 1. Commit offset with first storage instance
|
||||
// 2. Close first instance
|
||||
// 3. Create new storage instance
|
||||
// 4. Fetch offset and verify it persisted
|
||||
}
|
||||
|
||||
func TestFilerStorageMultipleGroups(t *testing.T) {
|
||||
t.Skip("Requires running filer - integration test")
|
||||
|
||||
// Test will:
|
||||
// 1. Commit offsets for multiple groups
|
||||
// 2. Fetch all offsets per group
|
||||
// 3. Verify isolation between groups
|
||||
}
|
||||
|
||||
func TestFilerStoragePath(t *testing.T) {
|
||||
// Test path generation (doesn't require filer)
|
||||
storage := &FilerStorage{}
|
||||
|
||||
group := "test-group"
|
||||
topic := "test-topic"
|
||||
partition := int32(5)
|
||||
|
||||
groupPath := storage.getGroupPath(group)
|
||||
assert.Equal(t, ConsumerOffsetsBasePath+"/test-group", groupPath)
|
||||
|
||||
topicPath := storage.getTopicPath(group, topic)
|
||||
assert.Equal(t, ConsumerOffsetsBasePath+"/test-group/test-topic", topicPath)
|
||||
|
||||
partitionPath := storage.getPartitionPath(group, topic, partition)
|
||||
assert.Equal(t, ConsumerOffsetsBasePath+"/test-group/test-topic/5", partitionPath)
|
||||
|
||||
offsetPath := storage.getOffsetPath(group, topic, partition)
|
||||
assert.Equal(t, ConsumerOffsetsBasePath+"/test-group/test-topic/5/offset", offsetPath)
|
||||
|
||||
metadataPath := storage.getMetadataPath(group, topic, partition)
|
||||
assert.Equal(t, ConsumerOffsetsBasePath+"/test-group/test-topic/5/metadata", metadataPath)
|
||||
}
|
||||
@@ -278,38 +278,3 @@ func (h *SeaweedMQHandler) checkTopicInFiler(topicName string) bool {
|
||||
|
||||
return exists
|
||||
}
|
||||
|
||||
// listTopicsFromFiler lists all topics from the filer
|
||||
func (h *SeaweedMQHandler) listTopicsFromFiler() []string {
|
||||
if h.filerClientAccessor == nil {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
var topics []string
|
||||
|
||||
h.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
||||
request := &filer_pb.ListEntriesRequest{
|
||||
Directory: "/topics/kafka",
|
||||
}
|
||||
|
||||
stream, err := client.ListEntries(context.Background(), request)
|
||||
if err != nil {
|
||||
return nil // Don't propagate error, just return empty list
|
||||
}
|
||||
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
break // End of stream or error
|
||||
}
|
||||
|
||||
if resp.Entry != nil && resp.Entry.IsDirectory {
|
||||
topics = append(topics, resp.Entry.Name)
|
||||
} else if resp.Entry != nil {
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return topics
|
||||
}
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
)
|
||||
|
||||
// Convenience functions for partition mapping used by production code
|
||||
// The full PartitionMapper implementation is in partition_mapping_test.go for testing
|
||||
|
||||
// MapKafkaPartitionToSMQRange maps a Kafka partition to SeaweedMQ ring range
|
||||
func MapKafkaPartitionToSMQRange(kafkaPartition int32) (rangeStart, rangeStop int32) {
|
||||
// Use a range size that divides evenly into MaxPartitionCount (2520)
|
||||
// Range size 35 gives us exactly 72 Kafka partitions: 2520 / 35 = 72
|
||||
rangeSize := int32(35)
|
||||
rangeStart = kafkaPartition * rangeSize
|
||||
rangeStop = rangeStart + rangeSize - 1
|
||||
return rangeStart, rangeStop
|
||||
}
|
||||
|
||||
// CreateSMQPartition creates a SeaweedMQ partition from a Kafka partition
|
||||
func CreateSMQPartition(kafkaPartition int32, unixTimeNs int64) *schema_pb.Partition {
|
||||
rangeStart, rangeStop := MapKafkaPartitionToSMQRange(kafkaPartition)
|
||||
|
||||
return &schema_pb.Partition{
|
||||
RingSize: pub_balancer.MaxPartitionCount,
|
||||
RangeStart: rangeStart,
|
||||
RangeStop: rangeStop,
|
||||
UnixTimeNs: unixTimeNs,
|
||||
}
|
||||
}
|
||||
|
||||
// ExtractKafkaPartitionFromSMQRange extracts the Kafka partition from SeaweedMQ range
|
||||
func ExtractKafkaPartitionFromSMQRange(rangeStart int32) int32 {
|
||||
rangeSize := int32(35)
|
||||
return rangeStart / rangeSize
|
||||
}
|
||||
|
||||
// ValidateKafkaPartition validates that a Kafka partition is within supported range
|
||||
func ValidateKafkaPartition(kafkaPartition int32) bool {
|
||||
maxPartitions := int32(pub_balancer.MaxPartitionCount) / 35 // 72 partitions
|
||||
return kafkaPartition >= 0 && kafkaPartition < maxPartitions
|
||||
}
|
||||
|
||||
// GetRangeSize returns the range size used for partition mapping
|
||||
func GetRangeSize() int32 {
|
||||
return 35
|
||||
}
|
||||
|
||||
// GetMaxKafkaPartitions returns the maximum number of Kafka partitions supported
|
||||
func GetMaxKafkaPartitions() int32 {
|
||||
return int32(pub_balancer.MaxPartitionCount) / 35 // 72 partitions
|
||||
}
|
||||
@@ -1,294 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
)
|
||||
|
||||
// PartitionMapper provides consistent Kafka partition to SeaweedMQ ring mapping
|
||||
// NOTE: This is test-only code and not used in the actual Kafka Gateway implementation
|
||||
type PartitionMapper struct{}
|
||||
|
||||
// NewPartitionMapper creates a new partition mapper
|
||||
func NewPartitionMapper() *PartitionMapper {
|
||||
return &PartitionMapper{}
|
||||
}
|
||||
|
||||
// GetRangeSize returns the consistent range size for Kafka partition mapping
|
||||
// This ensures all components use the same calculation
|
||||
func (pm *PartitionMapper) GetRangeSize() int32 {
|
||||
// Use a range size that divides evenly into MaxPartitionCount (2520)
|
||||
// Range size 35 gives us exactly 72 Kafka partitions: 2520 / 35 = 72
|
||||
// This provides a good balance between partition granularity and ring utilization
|
||||
return 35
|
||||
}
|
||||
|
||||
// GetMaxKafkaPartitions returns the maximum number of Kafka partitions supported
|
||||
func (pm *PartitionMapper) GetMaxKafkaPartitions() int32 {
|
||||
// With range size 35, we can support: 2520 / 35 = 72 Kafka partitions
|
||||
return int32(pub_balancer.MaxPartitionCount) / pm.GetRangeSize()
|
||||
}
|
||||
|
||||
// MapKafkaPartitionToSMQRange maps a Kafka partition to SeaweedMQ ring range
|
||||
func (pm *PartitionMapper) MapKafkaPartitionToSMQRange(kafkaPartition int32) (rangeStart, rangeStop int32) {
|
||||
rangeSize := pm.GetRangeSize()
|
||||
rangeStart = kafkaPartition * rangeSize
|
||||
rangeStop = rangeStart + rangeSize - 1
|
||||
return rangeStart, rangeStop
|
||||
}
|
||||
|
||||
// CreateSMQPartition creates a SeaweedMQ partition from a Kafka partition
|
||||
func (pm *PartitionMapper) CreateSMQPartition(kafkaPartition int32, unixTimeNs int64) *schema_pb.Partition {
|
||||
rangeStart, rangeStop := pm.MapKafkaPartitionToSMQRange(kafkaPartition)
|
||||
|
||||
return &schema_pb.Partition{
|
||||
RingSize: pub_balancer.MaxPartitionCount,
|
||||
RangeStart: rangeStart,
|
||||
RangeStop: rangeStop,
|
||||
UnixTimeNs: unixTimeNs,
|
||||
}
|
||||
}
|
||||
|
||||
// ExtractKafkaPartitionFromSMQRange extracts the Kafka partition from SeaweedMQ range
|
||||
func (pm *PartitionMapper) ExtractKafkaPartitionFromSMQRange(rangeStart int32) int32 {
|
||||
rangeSize := pm.GetRangeSize()
|
||||
return rangeStart / rangeSize
|
||||
}
|
||||
|
||||
// ValidateKafkaPartition validates that a Kafka partition is within supported range
|
||||
func (pm *PartitionMapper) ValidateKafkaPartition(kafkaPartition int32) bool {
|
||||
return kafkaPartition >= 0 && kafkaPartition < pm.GetMaxKafkaPartitions()
|
||||
}
|
||||
|
||||
// GetPartitionMappingInfo returns debug information about the partition mapping
|
||||
func (pm *PartitionMapper) GetPartitionMappingInfo() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"ring_size": pub_balancer.MaxPartitionCount,
|
||||
"range_size": pm.GetRangeSize(),
|
||||
"max_kafka_partitions": pm.GetMaxKafkaPartitions(),
|
||||
"ring_utilization": float64(pm.GetMaxKafkaPartitions()*pm.GetRangeSize()) / float64(pub_balancer.MaxPartitionCount),
|
||||
}
|
||||
}
|
||||
|
||||
// Global instance for consistent usage across the test codebase
|
||||
var DefaultPartitionMapper = NewPartitionMapper()
|
||||
|
||||
func TestPartitionMapper_GetRangeSize(t *testing.T) {
|
||||
mapper := NewPartitionMapper()
|
||||
rangeSize := mapper.GetRangeSize()
|
||||
|
||||
if rangeSize != 35 {
|
||||
t.Errorf("Expected range size 35, got %d", rangeSize)
|
||||
}
|
||||
|
||||
// Verify that the range size divides evenly into available partitions
|
||||
maxPartitions := mapper.GetMaxKafkaPartitions()
|
||||
totalUsed := maxPartitions * rangeSize
|
||||
|
||||
if totalUsed > int32(pub_balancer.MaxPartitionCount) {
|
||||
t.Errorf("Total used slots (%d) exceeds MaxPartitionCount (%d)", totalUsed, pub_balancer.MaxPartitionCount)
|
||||
}
|
||||
|
||||
t.Logf("Range size: %d, Max Kafka partitions: %d, Ring utilization: %.2f%%",
|
||||
rangeSize, maxPartitions, float64(totalUsed)/float64(pub_balancer.MaxPartitionCount)*100)
|
||||
}
|
||||
|
||||
func TestPartitionMapper_MapKafkaPartitionToSMQRange(t *testing.T) {
|
||||
mapper := NewPartitionMapper()
|
||||
|
||||
tests := []struct {
|
||||
kafkaPartition int32
|
||||
expectedStart int32
|
||||
expectedStop int32
|
||||
}{
|
||||
{0, 0, 34},
|
||||
{1, 35, 69},
|
||||
{2, 70, 104},
|
||||
{10, 350, 384},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
start, stop := mapper.MapKafkaPartitionToSMQRange(tt.kafkaPartition)
|
||||
|
||||
if start != tt.expectedStart {
|
||||
t.Errorf("Kafka partition %d: expected start %d, got %d", tt.kafkaPartition, tt.expectedStart, start)
|
||||
}
|
||||
|
||||
if stop != tt.expectedStop {
|
||||
t.Errorf("Kafka partition %d: expected stop %d, got %d", tt.kafkaPartition, tt.expectedStop, stop)
|
||||
}
|
||||
|
||||
// Verify range size is consistent
|
||||
rangeSize := stop - start + 1
|
||||
if rangeSize != mapper.GetRangeSize() {
|
||||
t.Errorf("Inconsistent range size: expected %d, got %d", mapper.GetRangeSize(), rangeSize)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionMapper_ExtractKafkaPartitionFromSMQRange(t *testing.T) {
|
||||
mapper := NewPartitionMapper()
|
||||
|
||||
tests := []struct {
|
||||
rangeStart int32
|
||||
expectedKafka int32
|
||||
}{
|
||||
{0, 0},
|
||||
{35, 1},
|
||||
{70, 2},
|
||||
{350, 10},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
kafkaPartition := mapper.ExtractKafkaPartitionFromSMQRange(tt.rangeStart)
|
||||
|
||||
if kafkaPartition != tt.expectedKafka {
|
||||
t.Errorf("Range start %d: expected Kafka partition %d, got %d",
|
||||
tt.rangeStart, tt.expectedKafka, kafkaPartition)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionMapper_RoundTrip(t *testing.T) {
|
||||
mapper := NewPartitionMapper()
|
||||
|
||||
// Test round-trip conversion for all valid Kafka partitions
|
||||
maxPartitions := mapper.GetMaxKafkaPartitions()
|
||||
|
||||
for kafkaPartition := int32(0); kafkaPartition < maxPartitions; kafkaPartition++ {
|
||||
// Kafka -> SMQ -> Kafka
|
||||
rangeStart, rangeStop := mapper.MapKafkaPartitionToSMQRange(kafkaPartition)
|
||||
extractedKafka := mapper.ExtractKafkaPartitionFromSMQRange(rangeStart)
|
||||
|
||||
if extractedKafka != kafkaPartition {
|
||||
t.Errorf("Round-trip failed for partition %d: got %d", kafkaPartition, extractedKafka)
|
||||
}
|
||||
|
||||
// Verify no overlap with next partition
|
||||
if kafkaPartition < maxPartitions-1 {
|
||||
nextStart, _ := mapper.MapKafkaPartitionToSMQRange(kafkaPartition + 1)
|
||||
if rangeStop >= nextStart {
|
||||
t.Errorf("Partition %d range [%d,%d] overlaps with partition %d start %d",
|
||||
kafkaPartition, rangeStart, rangeStop, kafkaPartition+1, nextStart)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionMapper_CreateSMQPartition(t *testing.T) {
|
||||
mapper := NewPartitionMapper()
|
||||
|
||||
kafkaPartition := int32(5)
|
||||
unixTimeNs := time.Now().UnixNano()
|
||||
|
||||
partition := mapper.CreateSMQPartition(kafkaPartition, unixTimeNs)
|
||||
|
||||
if partition.RingSize != pub_balancer.MaxPartitionCount {
|
||||
t.Errorf("Expected ring size %d, got %d", pub_balancer.MaxPartitionCount, partition.RingSize)
|
||||
}
|
||||
|
||||
expectedStart, expectedStop := mapper.MapKafkaPartitionToSMQRange(kafkaPartition)
|
||||
if partition.RangeStart != expectedStart {
|
||||
t.Errorf("Expected range start %d, got %d", expectedStart, partition.RangeStart)
|
||||
}
|
||||
|
||||
if partition.RangeStop != expectedStop {
|
||||
t.Errorf("Expected range stop %d, got %d", expectedStop, partition.RangeStop)
|
||||
}
|
||||
|
||||
if partition.UnixTimeNs != unixTimeNs {
|
||||
t.Errorf("Expected timestamp %d, got %d", unixTimeNs, partition.UnixTimeNs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionMapper_ValidateKafkaPartition(t *testing.T) {
|
||||
mapper := NewPartitionMapper()
|
||||
|
||||
tests := []struct {
|
||||
partition int32
|
||||
valid bool
|
||||
}{
|
||||
{-1, false},
|
||||
{0, true},
|
||||
{1, true},
|
||||
{mapper.GetMaxKafkaPartitions() - 1, true},
|
||||
{mapper.GetMaxKafkaPartitions(), false},
|
||||
{1000, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
valid := mapper.ValidateKafkaPartition(tt.partition)
|
||||
if valid != tt.valid {
|
||||
t.Errorf("Partition %d: expected valid=%v, got %v", tt.partition, tt.valid, valid)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionMapper_ConsistencyWithGlobalFunctions(t *testing.T) {
|
||||
mapper := NewPartitionMapper()
|
||||
|
||||
kafkaPartition := int32(7)
|
||||
unixTimeNs := time.Now().UnixNano()
|
||||
|
||||
// Test that global functions produce same results as mapper methods
|
||||
start1, stop1 := mapper.MapKafkaPartitionToSMQRange(kafkaPartition)
|
||||
start2, stop2 := MapKafkaPartitionToSMQRange(kafkaPartition)
|
||||
|
||||
if start1 != start2 || stop1 != stop2 {
|
||||
t.Errorf("Global function inconsistent: mapper=(%d,%d), global=(%d,%d)",
|
||||
start1, stop1, start2, stop2)
|
||||
}
|
||||
|
||||
partition1 := mapper.CreateSMQPartition(kafkaPartition, unixTimeNs)
|
||||
partition2 := CreateSMQPartition(kafkaPartition, unixTimeNs)
|
||||
|
||||
if partition1.RangeStart != partition2.RangeStart || partition1.RangeStop != partition2.RangeStop {
|
||||
t.Errorf("Global CreateSMQPartition inconsistent")
|
||||
}
|
||||
|
||||
extracted1 := mapper.ExtractKafkaPartitionFromSMQRange(start1)
|
||||
extracted2 := ExtractKafkaPartitionFromSMQRange(start1)
|
||||
|
||||
if extracted1 != extracted2 {
|
||||
t.Errorf("Global ExtractKafkaPartitionFromSMQRange inconsistent: %d vs %d", extracted1, extracted2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionMapper_GetPartitionMappingInfo(t *testing.T) {
|
||||
mapper := NewPartitionMapper()
|
||||
|
||||
info := mapper.GetPartitionMappingInfo()
|
||||
|
||||
// Verify all expected keys are present
|
||||
expectedKeys := []string{"ring_size", "range_size", "max_kafka_partitions", "ring_utilization"}
|
||||
for _, key := range expectedKeys {
|
||||
if _, exists := info[key]; !exists {
|
||||
t.Errorf("Missing key in mapping info: %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify values are reasonable
|
||||
if info["ring_size"].(int) != pub_balancer.MaxPartitionCount {
|
||||
t.Errorf("Incorrect ring_size in info")
|
||||
}
|
||||
|
||||
if info["range_size"].(int32) != mapper.GetRangeSize() {
|
||||
t.Errorf("Incorrect range_size in info")
|
||||
}
|
||||
|
||||
utilization := info["ring_utilization"].(float64)
|
||||
if utilization <= 0 || utilization > 1 {
|
||||
t.Errorf("Invalid ring utilization: %f", utilization)
|
||||
}
|
||||
|
||||
t.Logf("Partition mapping info: %+v", info)
|
||||
}
|
||||
@@ -2,11 +2,9 @@ package offset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
)
|
||||
|
||||
@@ -62,151 +60,6 @@ func BenchmarkBatchOffsetAssignment(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkSQLOffsetStorage benchmarks SQL storage operations
|
||||
func BenchmarkSQLOffsetStorage(b *testing.B) {
|
||||
// Create temporary database
|
||||
tmpFile, err := os.CreateTemp("", "benchmark_*.db")
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create temp database: %v", err)
|
||||
}
|
||||
tmpFile.Close()
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
db, err := CreateDatabase(tmpFile.Name())
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
partition := &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
partitionKey := partitionKey(partition)
|
||||
|
||||
b.Run("SaveCheckpoint", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
storage.SaveCheckpoint("test-namespace", "test-topic", partition, int64(i))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("LoadCheckpoint", func(b *testing.B) {
|
||||
storage.SaveCheckpoint("test-namespace", "test-topic", partition, 1000)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
storage.LoadCheckpoint("test-namespace", "test-topic", partition)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("SaveOffsetMapping", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
storage.SaveOffsetMapping(partitionKey, int64(i), int64(i*1000), 100)
|
||||
}
|
||||
})
|
||||
|
||||
// Pre-populate for read benchmarks
|
||||
for i := 0; i < 1000; i++ {
|
||||
storage.SaveOffsetMapping(partitionKey, int64(i), int64(i*1000), 100)
|
||||
}
|
||||
|
||||
b.Run("GetHighestOffset", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
storage.GetHighestOffset("test-namespace", "test-topic", partition)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("LoadOffsetMappings", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
storage.LoadOffsetMappings(partitionKey)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("GetOffsetMappingsByRange", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
start := int64(i % 900)
|
||||
end := start + 100
|
||||
storage.GetOffsetMappingsByRange(partitionKey, start, end)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("GetPartitionStats", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
storage.GetPartitionStats(partitionKey)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// BenchmarkInMemoryVsSQL compares in-memory and SQL storage performance
|
||||
func BenchmarkInMemoryVsSQL(b *testing.B) {
|
||||
partition := &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
// In-memory storage benchmark
|
||||
b.Run("InMemory", func(b *testing.B) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create partition manager: %v", err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
manager.AssignOffset()
|
||||
}
|
||||
})
|
||||
|
||||
// SQL storage benchmark
|
||||
b.Run("SQL", func(b *testing.B) {
|
||||
tmpFile, err := os.CreateTemp("", "benchmark_sql_*.db")
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create temp database: %v", err)
|
||||
}
|
||||
tmpFile.Close()
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
db, err := CreateDatabase(tmpFile.Name())
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to create partition manager: %v", err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
manager.AssignOffset()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// BenchmarkOffsetSubscription benchmarks subscription operations
|
||||
func BenchmarkOffsetSubscription(b *testing.B) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
|
||||
@@ -1,473 +0,0 @@
|
||||
package offset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
)
|
||||
|
||||
// TestEndToEndOffsetFlow tests the complete offset management flow
|
||||
func TestEndToEndOffsetFlow(t *testing.T) {
|
||||
// Create temporary database
|
||||
tmpFile, err := os.CreateTemp("", "e2e_offset_test_*.db")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp database: %v", err)
|
||||
}
|
||||
tmpFile.Close()
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
// Create database with migrations
|
||||
db, err := CreateDatabase(tmpFile.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create SQL storage
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
// Create SMQ offset integration
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
|
||||
// Test partition
|
||||
partition := &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
t.Run("PublishAndAssignOffsets", func(t *testing.T) {
|
||||
// Simulate publishing messages with offset assignment
|
||||
records := []PublishRecordRequest{
|
||||
{Key: []byte("user1"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("user2"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("user3"), Value: &schema_pb.RecordValue{}},
|
||||
}
|
||||
|
||||
response, err := integration.PublishRecordBatch("test-namespace", "test-topic", partition, records)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to publish record batch: %v", err)
|
||||
}
|
||||
|
||||
if response.BaseOffset != 0 {
|
||||
t.Errorf("Expected base offset 0, got %d", response.BaseOffset)
|
||||
}
|
||||
|
||||
if response.LastOffset != 2 {
|
||||
t.Errorf("Expected last offset 2, got %d", response.LastOffset)
|
||||
}
|
||||
|
||||
// Verify high water mark
|
||||
hwm, err := integration.GetHighWaterMark("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get high water mark: %v", err)
|
||||
}
|
||||
|
||||
if hwm != 3 {
|
||||
t.Errorf("Expected high water mark 3, got %d", hwm)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("CreateAndUseSubscription", func(t *testing.T) {
|
||||
// Create subscription from earliest
|
||||
sub, err := integration.CreateSubscription(
|
||||
"e2e-test-sub",
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
schema_pb.OffsetType_RESET_TO_EARLIEST,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Subscribe to records
|
||||
responses, err := integration.SubscribeRecords(sub, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to subscribe to records: %v", err)
|
||||
}
|
||||
|
||||
if len(responses) != 2 {
|
||||
t.Errorf("Expected 2 responses, got %d", len(responses))
|
||||
}
|
||||
|
||||
// Check subscription advancement
|
||||
if sub.CurrentOffset != 2 {
|
||||
t.Errorf("Expected current offset 2, got %d", sub.CurrentOffset)
|
||||
}
|
||||
|
||||
// Get subscription lag
|
||||
lag, err := sub.GetLag()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get lag: %v", err)
|
||||
}
|
||||
|
||||
if lag != 1 { // 3 (hwm) - 2 (current) = 1
|
||||
t.Errorf("Expected lag 1, got %d", lag)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("OffsetSeekingAndRanges", func(t *testing.T) {
|
||||
// Create subscription at specific offset
|
||||
sub, err := integration.CreateSubscription(
|
||||
"seek-test-sub",
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
schema_pb.OffsetType_EXACT_OFFSET,
|
||||
1,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription at offset 1: %v", err)
|
||||
}
|
||||
|
||||
// Verify starting position
|
||||
if sub.CurrentOffset != 1 {
|
||||
t.Errorf("Expected current offset 1, got %d", sub.CurrentOffset)
|
||||
}
|
||||
|
||||
// Get offset range
|
||||
offsetRange, err := sub.GetOffsetRange(2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get offset range: %v", err)
|
||||
}
|
||||
|
||||
if offsetRange.StartOffset != 1 {
|
||||
t.Errorf("Expected start offset 1, got %d", offsetRange.StartOffset)
|
||||
}
|
||||
|
||||
if offsetRange.Count != 2 {
|
||||
t.Errorf("Expected count 2, got %d", offsetRange.Count)
|
||||
}
|
||||
|
||||
// Seek to different offset
|
||||
err = sub.SeekToOffset(0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to seek to offset 0: %v", err)
|
||||
}
|
||||
|
||||
if sub.CurrentOffset != 0 {
|
||||
t.Errorf("Expected current offset 0 after seek, got %d", sub.CurrentOffset)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("PartitionInformationAndMetrics", func(t *testing.T) {
|
||||
// Get partition offset info
|
||||
info, err := integration.GetPartitionOffsetInfo("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get partition offset info: %v", err)
|
||||
}
|
||||
|
||||
if info.EarliestOffset != 0 {
|
||||
t.Errorf("Expected earliest offset 0, got %d", info.EarliestOffset)
|
||||
}
|
||||
|
||||
if info.LatestOffset != 2 {
|
||||
t.Errorf("Expected latest offset 2, got %d", info.LatestOffset)
|
||||
}
|
||||
|
||||
if info.HighWaterMark != 3 {
|
||||
t.Errorf("Expected high water mark 3, got %d", info.HighWaterMark)
|
||||
}
|
||||
|
||||
if info.ActiveSubscriptions != 2 { // Two subscriptions created above
|
||||
t.Errorf("Expected 2 active subscriptions, got %d", info.ActiveSubscriptions)
|
||||
}
|
||||
|
||||
// Get offset metrics
|
||||
metrics := integration.GetOffsetMetrics()
|
||||
if metrics.PartitionCount != 1 {
|
||||
t.Errorf("Expected 1 partition, got %d", metrics.PartitionCount)
|
||||
}
|
||||
|
||||
if metrics.ActiveSubscriptions != 2 {
|
||||
t.Errorf("Expected 2 active subscriptions in metrics, got %d", metrics.ActiveSubscriptions)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestOffsetPersistenceAcrossRestarts tests that offsets persist across system restarts
|
||||
func TestOffsetPersistenceAcrossRestarts(t *testing.T) {
|
||||
// Create temporary database
|
||||
tmpFile, err := os.CreateTemp("", "persistence_test_*.db")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp database: %v", err)
|
||||
}
|
||||
tmpFile.Close()
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
partition := &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
var lastOffset int64
|
||||
|
||||
// First session: Create database and assign offsets
|
||||
{
|
||||
db, err := CreateDatabase(tmpFile.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
|
||||
// Publish some records
|
||||
records := []PublishRecordRequest{
|
||||
{Key: []byte("msg1"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("msg2"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("msg3"), Value: &schema_pb.RecordValue{}},
|
||||
}
|
||||
|
||||
response, err := integration.PublishRecordBatch("test-namespace", "test-topic", partition, records)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to publish records: %v", err)
|
||||
}
|
||||
|
||||
lastOffset = response.LastOffset
|
||||
|
||||
// Close connections - Close integration first to trigger final checkpoint
|
||||
integration.Close()
|
||||
storage.Close()
|
||||
db.Close()
|
||||
}
|
||||
|
||||
// Second session: Reopen database and verify persistence
|
||||
{
|
||||
db, err := CreateDatabase(tmpFile.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reopen database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
|
||||
// Verify high water mark persisted
|
||||
hwm, err := integration.GetHighWaterMark("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get high water mark after restart: %v", err)
|
||||
}
|
||||
|
||||
if hwm != lastOffset+1 {
|
||||
t.Errorf("Expected high water mark %d after restart, got %d", lastOffset+1, hwm)
|
||||
}
|
||||
|
||||
// Assign new offsets and verify continuity
|
||||
newResponse, err := integration.PublishRecord("test-namespace", "test-topic", partition, []byte("msg4"), &schema_pb.RecordValue{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to publish new record after restart: %v", err)
|
||||
}
|
||||
|
||||
expectedNextOffset := lastOffset + 1
|
||||
if newResponse.BaseOffset != expectedNextOffset {
|
||||
t.Errorf("Expected next offset %d after restart, got %d", expectedNextOffset, newResponse.BaseOffset)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestConcurrentOffsetOperations tests concurrent offset operations
|
||||
func TestConcurrentOffsetOperations(t *testing.T) {
|
||||
// Create temporary database
|
||||
tmpFile, err := os.CreateTemp("", "concurrent_test_*.db")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp database: %v", err)
|
||||
}
|
||||
tmpFile.Close()
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
db, err := CreateDatabase(tmpFile.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
|
||||
partition := &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
// Concurrent publishers
|
||||
const numPublishers = 5
|
||||
const recordsPerPublisher = 10
|
||||
|
||||
done := make(chan bool, numPublishers)
|
||||
|
||||
for i := 0; i < numPublishers; i++ {
|
||||
go func(publisherID int) {
|
||||
defer func() { done <- true }()
|
||||
|
||||
for j := 0; j < recordsPerPublisher; j++ {
|
||||
key := fmt.Sprintf("publisher-%d-msg-%d", publisherID, j)
|
||||
_, err := integration.PublishRecord("test-namespace", "test-topic", partition, []byte(key), &schema_pb.RecordValue{})
|
||||
if err != nil {
|
||||
t.Errorf("Publisher %d failed to publish message %d: %v", publisherID, j, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for all publishers to complete
|
||||
for i := 0; i < numPublishers; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
// Verify total records
|
||||
hwm, err := integration.GetHighWaterMark("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get high water mark: %v", err)
|
||||
}
|
||||
|
||||
expectedTotal := int64(numPublishers * recordsPerPublisher)
|
||||
if hwm != expectedTotal {
|
||||
t.Errorf("Expected high water mark %d, got %d", expectedTotal, hwm)
|
||||
}
|
||||
|
||||
// Verify no duplicate offsets
|
||||
info, err := integration.GetPartitionOffsetInfo("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get partition info: %v", err)
|
||||
}
|
||||
|
||||
if info.RecordCount != expectedTotal {
|
||||
t.Errorf("Expected record count %d, got %d", expectedTotal, info.RecordCount)
|
||||
}
|
||||
}
|
||||
|
||||
// TestOffsetValidationAndErrorHandling tests error conditions and validation
|
||||
func TestOffsetValidationAndErrorHandling(t *testing.T) {
|
||||
// Create temporary database
|
||||
tmpFile, err := os.CreateTemp("", "validation_test_*.db")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp database: %v", err)
|
||||
}
|
||||
tmpFile.Close()
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
db, err := CreateDatabase(tmpFile.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
|
||||
partition := &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
t.Run("InvalidOffsetSubscription", func(t *testing.T) {
|
||||
// Try to create subscription with invalid offset
|
||||
_, err := integration.CreateSubscription(
|
||||
"invalid-sub",
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
schema_pb.OffsetType_EXACT_OFFSET,
|
||||
100, // Beyond any existing data
|
||||
)
|
||||
if err == nil {
|
||||
t.Error("Expected error for subscription beyond high water mark")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("NegativeOffsetValidation", func(t *testing.T) {
|
||||
// Try to create subscription with negative offset
|
||||
_, err := integration.CreateSubscription(
|
||||
"negative-sub",
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
schema_pb.OffsetType_EXACT_OFFSET,
|
||||
-1,
|
||||
)
|
||||
if err == nil {
|
||||
t.Error("Expected error for negative offset")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("DuplicateSubscriptionID", func(t *testing.T) {
|
||||
// Create first subscription
|
||||
_, err := integration.CreateSubscription(
|
||||
"duplicate-id",
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
schema_pb.OffsetType_RESET_TO_EARLIEST,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create first subscription: %v", err)
|
||||
}
|
||||
|
||||
// Try to create duplicate
|
||||
_, err = integration.CreateSubscription(
|
||||
"duplicate-id",
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
schema_pb.OffsetType_RESET_TO_EARLIEST,
|
||||
0,
|
||||
)
|
||||
if err == nil {
|
||||
t.Error("Expected error for duplicate subscription ID")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("OffsetRangeValidation", func(t *testing.T) {
|
||||
// Add some data first
|
||||
integration.PublishRecord("test-namespace", "test-topic", partition, []byte("test"), &schema_pb.RecordValue{})
|
||||
|
||||
// Test invalid range validation
|
||||
err := integration.ValidateOffsetRange("test-namespace", "test-topic", partition, 5, 10) // Beyond high water mark
|
||||
if err == nil {
|
||||
t.Error("Expected error for range beyond high water mark")
|
||||
}
|
||||
|
||||
err = integration.ValidateOffsetRange("test-namespace", "test-topic", partition, 10, 5) // End before start
|
||||
if err == nil {
|
||||
t.Error("Expected error for end offset before start offset")
|
||||
}
|
||||
|
||||
err = integration.ValidateOffsetRange("test-namespace", "test-topic", partition, -1, 5) // Negative start
|
||||
if err == nil {
|
||||
t.Error("Expected error for negative start offset")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -93,9 +93,3 @@ func (f *FilerOffsetStorage) getPartitionDir(namespace, topicName string, partit
|
||||
|
||||
return fmt.Sprintf("%s/%s/%s/%s/%s", filer.TopicsDir, namespace, topicName, version, partitionRange)
|
||||
}
|
||||
|
||||
// getPartitionKey generates a unique key for a partition
|
||||
func (f *FilerOffsetStorage) getPartitionKey(partition *schema_pb.Partition) string {
|
||||
return fmt.Sprintf("ring:%d:range:%d-%d:time:%d",
|
||||
partition.RingSize, partition.RangeStart, partition.RangeStop, partition.UnixTimeNs)
|
||||
}
|
||||
|
||||
@@ -1,544 +0,0 @@
|
||||
package offset
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
)
|
||||
|
||||
func TestSMQOffsetIntegration_PublishRecord(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Publish a single record
|
||||
response, err := integration.PublishRecord(
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
[]byte("test-key"),
|
||||
&schema_pb.RecordValue{},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to publish record: %v", err)
|
||||
}
|
||||
|
||||
if response.Error != "" {
|
||||
t.Errorf("Expected no error, got: %s", response.Error)
|
||||
}
|
||||
|
||||
if response.BaseOffset != 0 {
|
||||
t.Errorf("Expected base offset 0, got %d", response.BaseOffset)
|
||||
}
|
||||
|
||||
if response.LastOffset != 0 {
|
||||
t.Errorf("Expected last offset 0, got %d", response.LastOffset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_PublishRecordBatch(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Create batch of records
|
||||
records := []PublishRecordRequest{
|
||||
{Key: []byte("key1"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key2"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key3"), Value: &schema_pb.RecordValue{}},
|
||||
}
|
||||
|
||||
// Publish batch
|
||||
response, err := integration.PublishRecordBatch("test-namespace", "test-topic", partition, records)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to publish record batch: %v", err)
|
||||
}
|
||||
|
||||
if response.Error != "" {
|
||||
t.Errorf("Expected no error, got: %s", response.Error)
|
||||
}
|
||||
|
||||
if response.BaseOffset != 0 {
|
||||
t.Errorf("Expected base offset 0, got %d", response.BaseOffset)
|
||||
}
|
||||
|
||||
if response.LastOffset != 2 {
|
||||
t.Errorf("Expected last offset 2, got %d", response.LastOffset)
|
||||
}
|
||||
|
||||
// Verify high water mark
|
||||
hwm, err := integration.GetHighWaterMark("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get high water mark: %v", err)
|
||||
}
|
||||
|
||||
if hwm != 3 {
|
||||
t.Errorf("Expected high water mark 3, got %d", hwm)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_EmptyBatch(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Publish empty batch
|
||||
response, err := integration.PublishRecordBatch("test-namespace", "test-topic", partition, []PublishRecordRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to publish empty batch: %v", err)
|
||||
}
|
||||
|
||||
if response.Error == "" {
|
||||
t.Error("Expected error for empty batch")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_CreateSubscription(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Publish some records first
|
||||
records := []PublishRecordRequest{
|
||||
{Key: []byte("key1"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key2"), Value: &schema_pb.RecordValue{}},
|
||||
}
|
||||
integration.PublishRecordBatch("test-namespace", "test-topic", partition, records)
|
||||
|
||||
// Create subscription
|
||||
sub, err := integration.CreateSubscription(
|
||||
"test-sub",
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
schema_pb.OffsetType_RESET_TO_EARLIEST,
|
||||
0,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
if sub.ID != "test-sub" {
|
||||
t.Errorf("Expected subscription ID 'test-sub', got %s", sub.ID)
|
||||
}
|
||||
|
||||
if sub.StartOffset != 0 {
|
||||
t.Errorf("Expected start offset 0, got %d", sub.StartOffset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_SubscribeRecords(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Publish some records
|
||||
records := []PublishRecordRequest{
|
||||
{Key: []byte("key1"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key2"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key3"), Value: &schema_pb.RecordValue{}},
|
||||
}
|
||||
integration.PublishRecordBatch("test-namespace", "test-topic", partition, records)
|
||||
|
||||
// Create subscription
|
||||
sub, err := integration.CreateSubscription(
|
||||
"test-sub",
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
schema_pb.OffsetType_RESET_TO_EARLIEST,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Subscribe to records
|
||||
responses, err := integration.SubscribeRecords(sub, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to subscribe to records: %v", err)
|
||||
}
|
||||
|
||||
if len(responses) != 2 {
|
||||
t.Errorf("Expected 2 responses, got %d", len(responses))
|
||||
}
|
||||
|
||||
// Check offset progression
|
||||
if responses[0].Offset != 0 {
|
||||
t.Errorf("Expected first record offset 0, got %d", responses[0].Offset)
|
||||
}
|
||||
|
||||
if responses[1].Offset != 1 {
|
||||
t.Errorf("Expected second record offset 1, got %d", responses[1].Offset)
|
||||
}
|
||||
|
||||
// Check subscription advancement
|
||||
if sub.CurrentOffset != 2 {
|
||||
t.Errorf("Expected subscription current offset 2, got %d", sub.CurrentOffset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_SubscribeEmptyPartition(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Create subscription on empty partition
|
||||
sub, err := integration.CreateSubscription(
|
||||
"empty-sub",
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
schema_pb.OffsetType_RESET_TO_EARLIEST,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Subscribe to records (should return empty)
|
||||
responses, err := integration.SubscribeRecords(sub, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to subscribe to empty partition: %v", err)
|
||||
}
|
||||
|
||||
if len(responses) != 0 {
|
||||
t.Errorf("Expected 0 responses from empty partition, got %d", len(responses))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_SeekSubscription(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Publish records
|
||||
records := []PublishRecordRequest{
|
||||
{Key: []byte("key1"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key2"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key3"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key4"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key5"), Value: &schema_pb.RecordValue{}},
|
||||
}
|
||||
integration.PublishRecordBatch("test-namespace", "test-topic", partition, records)
|
||||
|
||||
// Create subscription
|
||||
sub, err := integration.CreateSubscription(
|
||||
"seek-sub",
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
schema_pb.OffsetType_RESET_TO_EARLIEST,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Seek to offset 3
|
||||
err = integration.SeekSubscription("seek-sub", 3)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to seek subscription: %v", err)
|
||||
}
|
||||
|
||||
if sub.CurrentOffset != 3 {
|
||||
t.Errorf("Expected current offset 3 after seek, got %d", sub.CurrentOffset)
|
||||
}
|
||||
|
||||
// Subscribe from new position
|
||||
responses, err := integration.SubscribeRecords(sub, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to subscribe after seek: %v", err)
|
||||
}
|
||||
|
||||
if len(responses) != 2 {
|
||||
t.Errorf("Expected 2 responses after seek, got %d", len(responses))
|
||||
}
|
||||
|
||||
if responses[0].Offset != 3 {
|
||||
t.Errorf("Expected first record offset 3 after seek, got %d", responses[0].Offset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_GetSubscriptionLag(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Publish records
|
||||
records := []PublishRecordRequest{
|
||||
{Key: []byte("key1"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key2"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key3"), Value: &schema_pb.RecordValue{}},
|
||||
}
|
||||
integration.PublishRecordBatch("test-namespace", "test-topic", partition, records)
|
||||
|
||||
// Create subscription at offset 1
|
||||
sub, err := integration.CreateSubscription(
|
||||
"lag-sub",
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
schema_pb.OffsetType_EXACT_OFFSET,
|
||||
1,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Get lag
|
||||
lag, err := integration.GetSubscriptionLag("lag-sub")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get subscription lag: %v", err)
|
||||
}
|
||||
|
||||
expectedLag := int64(3 - 1) // hwm - current
|
||||
if lag != expectedLag {
|
||||
t.Errorf("Expected lag %d, got %d", expectedLag, lag)
|
||||
}
|
||||
|
||||
// Advance subscription and check lag again
|
||||
integration.SubscribeRecords(sub, 1)
|
||||
|
||||
lag, err = integration.GetSubscriptionLag("lag-sub")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get lag after advance: %v", err)
|
||||
}
|
||||
|
||||
expectedLag = int64(3 - 2) // hwm - current
|
||||
if lag != expectedLag {
|
||||
t.Errorf("Expected lag %d after advance, got %d", expectedLag, lag)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_CloseSubscription(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Create subscription
|
||||
_, err := integration.CreateSubscription(
|
||||
"close-sub",
|
||||
"test-namespace", "test-topic",
|
||||
partition,
|
||||
schema_pb.OffsetType_RESET_TO_EARLIEST,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Close subscription
|
||||
err = integration.CloseSubscription("close-sub")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to close subscription: %v", err)
|
||||
}
|
||||
|
||||
// Try to get lag (should fail)
|
||||
_, err = integration.GetSubscriptionLag("close-sub")
|
||||
if err == nil {
|
||||
t.Error("Expected error when getting lag for closed subscription")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_ValidateOffsetRange(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Publish some records
|
||||
records := []PublishRecordRequest{
|
||||
{Key: []byte("key1"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key2"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key3"), Value: &schema_pb.RecordValue{}},
|
||||
}
|
||||
integration.PublishRecordBatch("test-namespace", "test-topic", partition, records)
|
||||
|
||||
// Test valid range
|
||||
err := integration.ValidateOffsetRange("test-namespace", "test-topic", partition, 0, 2)
|
||||
if err != nil {
|
||||
t.Errorf("Valid range should not return error: %v", err)
|
||||
}
|
||||
|
||||
// Test invalid range (beyond hwm)
|
||||
err = integration.ValidateOffsetRange("test-namespace", "test-topic", partition, 0, 5)
|
||||
if err == nil {
|
||||
t.Error("Expected error for range beyond high water mark")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_GetAvailableOffsetRange(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Test empty partition
|
||||
offsetRange, err := integration.GetAvailableOffsetRange("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get available range for empty partition: %v", err)
|
||||
}
|
||||
|
||||
if offsetRange.Count != 0 {
|
||||
t.Errorf("Expected empty range for empty partition, got count %d", offsetRange.Count)
|
||||
}
|
||||
|
||||
// Publish records
|
||||
records := []PublishRecordRequest{
|
||||
{Key: []byte("key1"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key2"), Value: &schema_pb.RecordValue{}},
|
||||
}
|
||||
integration.PublishRecordBatch("test-namespace", "test-topic", partition, records)
|
||||
|
||||
// Test with data
|
||||
offsetRange, err = integration.GetAvailableOffsetRange("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get available range: %v", err)
|
||||
}
|
||||
|
||||
if offsetRange.StartOffset != 0 {
|
||||
t.Errorf("Expected start offset 0, got %d", offsetRange.StartOffset)
|
||||
}
|
||||
|
||||
if offsetRange.EndOffset != 1 {
|
||||
t.Errorf("Expected end offset 1, got %d", offsetRange.EndOffset)
|
||||
}
|
||||
|
||||
if offsetRange.Count != 2 {
|
||||
t.Errorf("Expected count 2, got %d", offsetRange.Count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_GetOffsetMetrics(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Initial metrics
|
||||
metrics := integration.GetOffsetMetrics()
|
||||
if metrics.TotalOffsets != 0 {
|
||||
t.Errorf("Expected 0 total offsets initially, got %d", metrics.TotalOffsets)
|
||||
}
|
||||
|
||||
if metrics.ActiveSubscriptions != 0 {
|
||||
t.Errorf("Expected 0 active subscriptions initially, got %d", metrics.ActiveSubscriptions)
|
||||
}
|
||||
|
||||
// Publish records
|
||||
records := []PublishRecordRequest{
|
||||
{Key: []byte("key1"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key2"), Value: &schema_pb.RecordValue{}},
|
||||
}
|
||||
integration.PublishRecordBatch("test-namespace", "test-topic", partition, records)
|
||||
|
||||
// Create subscriptions
|
||||
integration.CreateSubscription("sub1", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
|
||||
integration.CreateSubscription("sub2", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
|
||||
|
||||
// Check updated metrics
|
||||
metrics = integration.GetOffsetMetrics()
|
||||
if metrics.TotalOffsets != 2 {
|
||||
t.Errorf("Expected 2 total offsets, got %d", metrics.TotalOffsets)
|
||||
}
|
||||
|
||||
if metrics.ActiveSubscriptions != 2 {
|
||||
t.Errorf("Expected 2 active subscriptions, got %d", metrics.ActiveSubscriptions)
|
||||
}
|
||||
|
||||
if metrics.PartitionCount != 1 {
|
||||
t.Errorf("Expected 1 partition, got %d", metrics.PartitionCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_GetOffsetInfo(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Test non-existent offset
|
||||
info, err := integration.GetOffsetInfo("test-namespace", "test-topic", partition, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get offset info: %v", err)
|
||||
}
|
||||
|
||||
if info.Exists {
|
||||
t.Error("Offset should not exist in empty partition")
|
||||
}
|
||||
|
||||
// Publish record
|
||||
integration.PublishRecord("test-namespace", "test-topic", partition, []byte("key1"), &schema_pb.RecordValue{})
|
||||
|
||||
// Test existing offset
|
||||
info, err = integration.GetOffsetInfo("test-namespace", "test-topic", partition, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get offset info for existing offset: %v", err)
|
||||
}
|
||||
|
||||
if !info.Exists {
|
||||
t.Error("Offset should exist after publishing")
|
||||
}
|
||||
|
||||
if info.Offset != 0 {
|
||||
t.Errorf("Expected offset 0, got %d", info.Offset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSMQOffsetIntegration_GetPartitionOffsetInfo(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
integration := NewSMQOffsetIntegration(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Test empty partition
|
||||
info, err := integration.GetPartitionOffsetInfo("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get partition offset info: %v", err)
|
||||
}
|
||||
|
||||
if info.EarliestOffset != 0 {
|
||||
t.Errorf("Expected earliest offset 0, got %d", info.EarliestOffset)
|
||||
}
|
||||
|
||||
if info.LatestOffset != -1 {
|
||||
t.Errorf("Expected latest offset -1 for empty partition, got %d", info.LatestOffset)
|
||||
}
|
||||
|
||||
if info.HighWaterMark != 0 {
|
||||
t.Errorf("Expected high water mark 0, got %d", info.HighWaterMark)
|
||||
}
|
||||
|
||||
if info.RecordCount != 0 {
|
||||
t.Errorf("Expected record count 0, got %d", info.RecordCount)
|
||||
}
|
||||
|
||||
// Publish records
|
||||
records := []PublishRecordRequest{
|
||||
{Key: []byte("key1"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key2"), Value: &schema_pb.RecordValue{}},
|
||||
{Key: []byte("key3"), Value: &schema_pb.RecordValue{}},
|
||||
}
|
||||
integration.PublishRecordBatch("test-namespace", "test-topic", partition, records)
|
||||
|
||||
// Create subscription
|
||||
integration.CreateSubscription("test-sub", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
|
||||
|
||||
// Test with data
|
||||
info, err = integration.GetPartitionOffsetInfo("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get partition offset info with data: %v", err)
|
||||
}
|
||||
|
||||
if info.EarliestOffset != 0 {
|
||||
t.Errorf("Expected earliest offset 0, got %d", info.EarliestOffset)
|
||||
}
|
||||
|
||||
if info.LatestOffset != 2 {
|
||||
t.Errorf("Expected latest offset 2, got %d", info.LatestOffset)
|
||||
}
|
||||
|
||||
if info.HighWaterMark != 3 {
|
||||
t.Errorf("Expected high water mark 3, got %d", info.HighWaterMark)
|
||||
}
|
||||
|
||||
if info.RecordCount != 3 {
|
||||
t.Errorf("Expected record count 3, got %d", info.RecordCount)
|
||||
}
|
||||
|
||||
if info.ActiveSubscriptions != 1 {
|
||||
t.Errorf("Expected 1 active subscription, got %d", info.ActiveSubscriptions)
|
||||
}
|
||||
}
|
||||
@@ -338,13 +338,6 @@ type OffsetAssigner struct {
|
||||
registry *PartitionOffsetRegistry
|
||||
}
|
||||
|
||||
// NewOffsetAssigner creates a new offset assigner
|
||||
func NewOffsetAssigner(storage OffsetStorage) *OffsetAssigner {
|
||||
return &OffsetAssigner{
|
||||
registry: NewPartitionOffsetRegistry(storage),
|
||||
}
|
||||
}
|
||||
|
||||
// AssignSingleOffset assigns a single offset with timestamp
|
||||
func (a *OffsetAssigner) AssignSingleOffset(namespace, topicName string, partition *schema_pb.Partition) *AssignmentResult {
|
||||
offset, err := a.registry.AssignOffset(namespace, topicName, partition)
|
||||
|
||||
@@ -1,388 +0,0 @@
|
||||
package offset
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
)
|
||||
|
||||
func createTestPartition() *schema_pb.Partition {
|
||||
return &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManager_BasicAssignment(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
partition := createTestPartition()
|
||||
|
||||
manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create offset manager: %v", err)
|
||||
}
|
||||
|
||||
// Test sequential offset assignment
|
||||
for i := int64(0); i < 10; i++ {
|
||||
offset := manager.AssignOffset()
|
||||
if offset != i {
|
||||
t.Errorf("Expected offset %d, got %d", i, offset)
|
||||
}
|
||||
}
|
||||
|
||||
// Test high water mark
|
||||
hwm := manager.GetHighWaterMark()
|
||||
if hwm != 10 {
|
||||
t.Errorf("Expected high water mark 10, got %d", hwm)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManager_BatchAssignment(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
partition := createTestPartition()
|
||||
|
||||
manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create offset manager: %v", err)
|
||||
}
|
||||
|
||||
// Assign batch of 5 offsets
|
||||
baseOffset, lastOffset := manager.AssignOffsets(5)
|
||||
if baseOffset != 0 {
|
||||
t.Errorf("Expected base offset 0, got %d", baseOffset)
|
||||
}
|
||||
if lastOffset != 4 {
|
||||
t.Errorf("Expected last offset 4, got %d", lastOffset)
|
||||
}
|
||||
|
||||
// Assign another batch
|
||||
baseOffset, lastOffset = manager.AssignOffsets(3)
|
||||
if baseOffset != 5 {
|
||||
t.Errorf("Expected base offset 5, got %d", baseOffset)
|
||||
}
|
||||
if lastOffset != 7 {
|
||||
t.Errorf("Expected last offset 7, got %d", lastOffset)
|
||||
}
|
||||
|
||||
// Check high water mark
|
||||
hwm := manager.GetHighWaterMark()
|
||||
if hwm != 8 {
|
||||
t.Errorf("Expected high water mark 8, got %d", hwm)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManager_Recovery(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
partition := createTestPartition()
|
||||
|
||||
// Create manager and assign some offsets
|
||||
manager1, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create offset manager: %v", err)
|
||||
}
|
||||
|
||||
// Assign offsets and simulate records
|
||||
for i := 0; i < 150; i++ { // More than checkpoint interval
|
||||
offset := manager1.AssignOffset()
|
||||
storage.AddRecord("test-namespace", "test-topic", partition, offset)
|
||||
}
|
||||
|
||||
// Wait for checkpoint to complete
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Create new manager (simulates restart)
|
||||
manager2, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create offset manager after recovery: %v", err)
|
||||
}
|
||||
|
||||
// Next offset should continue from checkpoint + 1
|
||||
// With checkpoint interval 100, checkpoint happens at offset 100
|
||||
// So recovery should start from 101, but we assigned 150 offsets (0-149)
|
||||
// The checkpoint should be at 100, so next offset should be 101
|
||||
// But since we have records up to 149, it should recover from storage scan
|
||||
nextOffset := manager2.AssignOffset()
|
||||
if nextOffset != 150 {
|
||||
t.Errorf("Expected next offset 150 after recovery, got %d", nextOffset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManager_RecoveryFromStorage(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
partition := createTestPartition()
|
||||
|
||||
// Simulate existing records in storage without checkpoint
|
||||
for i := int64(0); i < 50; i++ {
|
||||
storage.AddRecord("test-namespace", "test-topic", partition, i)
|
||||
}
|
||||
|
||||
// Create manager - should recover from storage scan
|
||||
manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create offset manager: %v", err)
|
||||
}
|
||||
|
||||
// Next offset should be 50
|
||||
nextOffset := manager.AssignOffset()
|
||||
if nextOffset != 50 {
|
||||
t.Errorf("Expected next offset 50 after storage recovery, got %d", nextOffset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionOffsetRegistry_MultiplePartitions(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
|
||||
// Create different partitions
|
||||
partition1 := &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
partition2 := &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 32,
|
||||
RangeStop: 63,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
// Assign offsets to different partitions
|
||||
offset1, err := registry.AssignOffset("test-namespace", "test-topic", partition1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assign offset to partition1: %v", err)
|
||||
}
|
||||
if offset1 != 0 {
|
||||
t.Errorf("Expected offset 0 for partition1, got %d", offset1)
|
||||
}
|
||||
|
||||
offset2, err := registry.AssignOffset("test-namespace", "test-topic", partition2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assign offset to partition2: %v", err)
|
||||
}
|
||||
if offset2 != 0 {
|
||||
t.Errorf("Expected offset 0 for partition2, got %d", offset2)
|
||||
}
|
||||
|
||||
// Assign more offsets to partition1
|
||||
offset1_2, err := registry.AssignOffset("test-namespace", "test-topic", partition1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assign second offset to partition1: %v", err)
|
||||
}
|
||||
if offset1_2 != 1 {
|
||||
t.Errorf("Expected offset 1 for partition1, got %d", offset1_2)
|
||||
}
|
||||
|
||||
// Partition2 should still be at 0 for next assignment
|
||||
offset2_2, err := registry.AssignOffset("test-namespace", "test-topic", partition2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assign second offset to partition2: %v", err)
|
||||
}
|
||||
if offset2_2 != 1 {
|
||||
t.Errorf("Expected offset 1 for partition2, got %d", offset2_2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionOffsetRegistry_BatchAssignment(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Assign batch of offsets
|
||||
baseOffset, lastOffset, err := registry.AssignOffsets("test-namespace", "test-topic", partition, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to assign batch offsets: %v", err)
|
||||
}
|
||||
|
||||
if baseOffset != 0 {
|
||||
t.Errorf("Expected base offset 0, got %d", baseOffset)
|
||||
}
|
||||
if lastOffset != 9 {
|
||||
t.Errorf("Expected last offset 9, got %d", lastOffset)
|
||||
}
|
||||
|
||||
// Get high water mark
|
||||
hwm, err := registry.GetHighWaterMark("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get high water mark: %v", err)
|
||||
}
|
||||
if hwm != 10 {
|
||||
t.Errorf("Expected high water mark 10, got %d", hwm)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetAssigner_SingleAssignment(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
assigner := NewOffsetAssigner(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Assign single offset
|
||||
result := assigner.AssignSingleOffset("test-namespace", "test-topic", partition)
|
||||
if result.Error != nil {
|
||||
t.Fatalf("Failed to assign single offset: %v", result.Error)
|
||||
}
|
||||
|
||||
if result.Assignment == nil {
|
||||
t.Fatal("Assignment result is nil")
|
||||
}
|
||||
|
||||
if result.Assignment.Offset != 0 {
|
||||
t.Errorf("Expected offset 0, got %d", result.Assignment.Offset)
|
||||
}
|
||||
|
||||
if result.Assignment.Partition != partition {
|
||||
t.Error("Partition mismatch in assignment")
|
||||
}
|
||||
|
||||
if result.Assignment.Timestamp <= 0 {
|
||||
t.Error("Timestamp should be set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetAssigner_BatchAssignment(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
assigner := NewOffsetAssigner(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Assign batch of offsets
|
||||
result := assigner.AssignBatchOffsets("test-namespace", "test-topic", partition, 5)
|
||||
if result.Error != nil {
|
||||
t.Fatalf("Failed to assign batch offsets: %v", result.Error)
|
||||
}
|
||||
|
||||
if result.Batch == nil {
|
||||
t.Fatal("Batch result is nil")
|
||||
}
|
||||
|
||||
if result.Batch.BaseOffset != 0 {
|
||||
t.Errorf("Expected base offset 0, got %d", result.Batch.BaseOffset)
|
||||
}
|
||||
|
||||
if result.Batch.LastOffset != 4 {
|
||||
t.Errorf("Expected last offset 4, got %d", result.Batch.LastOffset)
|
||||
}
|
||||
|
||||
if result.Batch.Count != 5 {
|
||||
t.Errorf("Expected count 5, got %d", result.Batch.Count)
|
||||
}
|
||||
|
||||
if result.Batch.Timestamp <= 0 {
|
||||
t.Error("Timestamp should be set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetAssigner_HighWaterMark(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
assigner := NewOffsetAssigner(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Initially should be 0
|
||||
hwm, err := assigner.GetHighWaterMark("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get initial high water mark: %v", err)
|
||||
}
|
||||
if hwm != 0 {
|
||||
t.Errorf("Expected initial high water mark 0, got %d", hwm)
|
||||
}
|
||||
|
||||
// Assign some offsets
|
||||
assigner.AssignBatchOffsets("test-namespace", "test-topic", partition, 10)
|
||||
|
||||
// High water mark should be updated
|
||||
hwm, err = assigner.GetHighWaterMark("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get high water mark after assignment: %v", err)
|
||||
}
|
||||
if hwm != 10 {
|
||||
t.Errorf("Expected high water mark 10, got %d", hwm)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionKey(t *testing.T) {
|
||||
partition1 := &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: 1234567890,
|
||||
}
|
||||
|
||||
partition2 := &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: 1234567890,
|
||||
}
|
||||
|
||||
partition3 := &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 32,
|
||||
RangeStop: 63,
|
||||
UnixTimeNs: 1234567890,
|
||||
}
|
||||
|
||||
key1 := partitionKey(partition1)
|
||||
key2 := partitionKey(partition2)
|
||||
key3 := partitionKey(partition3)
|
||||
|
||||
// Same partitions should have same key
|
||||
if key1 != key2 {
|
||||
t.Errorf("Same partitions should have same key: %s vs %s", key1, key2)
|
||||
}
|
||||
|
||||
// Different partitions should have different keys
|
||||
if key1 == key3 {
|
||||
t.Errorf("Different partitions should have different keys: %s vs %s", key1, key3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrentOffsetAssignment(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
partition := createTestPartition()
|
||||
|
||||
const numGoroutines = 10
|
||||
const offsetsPerGoroutine = 100
|
||||
|
||||
results := make(chan int64, numGoroutines*offsetsPerGoroutine)
|
||||
|
||||
// Start concurrent offset assignments
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
for j := 0; j < offsetsPerGoroutine; j++ {
|
||||
offset, err := registry.AssignOffset("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to assign offset: %v", err)
|
||||
return
|
||||
}
|
||||
results <- offset
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Collect all results
|
||||
offsets := make(map[int64]bool)
|
||||
for i := 0; i < numGoroutines*offsetsPerGoroutine; i++ {
|
||||
offset := <-results
|
||||
if offsets[offset] {
|
||||
t.Errorf("Duplicate offset assigned: %d", offset)
|
||||
}
|
||||
offsets[offset] = true
|
||||
}
|
||||
|
||||
// Verify we got all expected offsets
|
||||
expectedCount := numGoroutines * offsetsPerGoroutine
|
||||
if len(offsets) != expectedCount {
|
||||
t.Errorf("Expected %d unique offsets, got %d", expectedCount, len(offsets))
|
||||
}
|
||||
|
||||
// Verify offsets are in expected range
|
||||
for offset := range offsets {
|
||||
if offset < 0 || offset >= int64(expectedCount) {
|
||||
t.Errorf("Offset %d is out of expected range [0, %d)", offset, expectedCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,302 +0,0 @@
|
||||
package offset
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MigrationVersion represents a database migration version
|
||||
type MigrationVersion struct {
|
||||
Version int
|
||||
Description string
|
||||
SQL string
|
||||
}
|
||||
|
||||
// GetMigrations returns all available migrations for offset storage
|
||||
func GetMigrations() []MigrationVersion {
|
||||
return []MigrationVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Description: "Create initial offset storage tables",
|
||||
SQL: `
|
||||
-- Partition offset checkpoints table
|
||||
-- TODO: Add _index as computed column when supported by database
|
||||
CREATE TABLE IF NOT EXISTS partition_offset_checkpoints (
|
||||
partition_key TEXT PRIMARY KEY,
|
||||
ring_size INTEGER NOT NULL,
|
||||
range_start INTEGER NOT NULL,
|
||||
range_stop INTEGER NOT NULL,
|
||||
unix_time_ns INTEGER NOT NULL,
|
||||
checkpoint_offset INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
);
|
||||
|
||||
-- Offset mappings table for detailed tracking
|
||||
-- TODO: Add _index as computed column when supported by database
|
||||
CREATE TABLE IF NOT EXISTS offset_mappings (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
partition_key TEXT NOT NULL,
|
||||
kafka_offset INTEGER NOT NULL,
|
||||
smq_timestamp INTEGER NOT NULL,
|
||||
message_size INTEGER NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
UNIQUE(partition_key, kafka_offset)
|
||||
);
|
||||
|
||||
-- Schema migrations tracking table
|
||||
CREATE TABLE IF NOT EXISTS schema_migrations (
|
||||
version INTEGER PRIMARY KEY,
|
||||
description TEXT NOT NULL,
|
||||
applied_at INTEGER NOT NULL
|
||||
);
|
||||
`,
|
||||
},
|
||||
{
|
||||
Version: 2,
|
||||
Description: "Add indexes for performance optimization",
|
||||
SQL: `
|
||||
-- Indexes for performance
|
||||
CREATE INDEX IF NOT EXISTS idx_partition_offset_checkpoints_partition
|
||||
ON partition_offset_checkpoints(partition_key);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_offset_mappings_partition_offset
|
||||
ON offset_mappings(partition_key, kafka_offset);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_offset_mappings_timestamp
|
||||
ON offset_mappings(partition_key, smq_timestamp);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_offset_mappings_created_at
|
||||
ON offset_mappings(created_at);
|
||||
`,
|
||||
},
|
||||
{
|
||||
Version: 3,
|
||||
Description: "Add partition metadata table for enhanced tracking",
|
||||
SQL: `
|
||||
-- Partition metadata table
|
||||
CREATE TABLE IF NOT EXISTS partition_metadata (
|
||||
partition_key TEXT PRIMARY KEY,
|
||||
ring_size INTEGER NOT NULL,
|
||||
range_start INTEGER NOT NULL,
|
||||
range_stop INTEGER NOT NULL,
|
||||
unix_time_ns INTEGER NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
last_activity_at INTEGER NOT NULL,
|
||||
record_count INTEGER DEFAULT 0,
|
||||
total_size INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
-- Index for partition metadata
|
||||
CREATE INDEX IF NOT EXISTS idx_partition_metadata_activity
|
||||
ON partition_metadata(last_activity_at);
|
||||
`,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// MigrationManager handles database schema migrations
|
||||
type MigrationManager struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewMigrationManager creates a new migration manager
|
||||
func NewMigrationManager(db *sql.DB) *MigrationManager {
|
||||
return &MigrationManager{db: db}
|
||||
}
|
||||
|
||||
// GetCurrentVersion returns the current schema version
|
||||
func (m *MigrationManager) GetCurrentVersion() (int, error) {
|
||||
// First, ensure the migrations table exists
|
||||
_, err := m.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS schema_migrations (
|
||||
version INTEGER PRIMARY KEY,
|
||||
description TEXT NOT NULL,
|
||||
applied_at INTEGER NOT NULL
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to create migrations table: %w", err)
|
||||
}
|
||||
|
||||
var version sql.NullInt64
|
||||
err = m.db.QueryRow("SELECT MAX(version) FROM schema_migrations").Scan(&version)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get current version: %w", err)
|
||||
}
|
||||
|
||||
if !version.Valid {
|
||||
return 0, nil // No migrations applied yet
|
||||
}
|
||||
|
||||
return int(version.Int64), nil
|
||||
}
|
||||
|
||||
// ApplyMigrations applies all pending migrations
|
||||
func (m *MigrationManager) ApplyMigrations() error {
|
||||
currentVersion, err := m.GetCurrentVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get current version: %w", err)
|
||||
}
|
||||
|
||||
migrations := GetMigrations()
|
||||
|
||||
for _, migration := range migrations {
|
||||
if migration.Version <= currentVersion {
|
||||
continue // Already applied
|
||||
}
|
||||
|
||||
fmt.Printf("Applying migration %d: %s\n", migration.Version, migration.Description)
|
||||
|
||||
// Begin transaction
|
||||
tx, err := m.db.Begin()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to begin transaction for migration %d: %w", migration.Version, err)
|
||||
}
|
||||
|
||||
// Execute migration SQL
|
||||
_, err = tx.Exec(migration.SQL)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf("failed to execute migration %d: %w", migration.Version, err)
|
||||
}
|
||||
|
||||
// Record migration as applied
|
||||
_, err = tx.Exec(
|
||||
"INSERT INTO schema_migrations (version, description, applied_at) VALUES (?, ?, ?)",
|
||||
migration.Version,
|
||||
migration.Description,
|
||||
getCurrentTimestamp(),
|
||||
)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf("failed to record migration %d: %w", migration.Version, err)
|
||||
}
|
||||
|
||||
// Commit transaction
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to commit migration %d: %w", migration.Version, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully applied migration %d\n", migration.Version)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RollbackMigration rolls back a specific migration (if supported)
|
||||
func (m *MigrationManager) RollbackMigration(version int) error {
|
||||
// TODO: Implement rollback functionality
|
||||
// ASSUMPTION: For now, rollbacks are not supported as they require careful planning
|
||||
return fmt.Errorf("migration rollbacks not implemented - manual intervention required")
|
||||
}
|
||||
|
||||
// GetAppliedMigrations returns a list of all applied migrations
|
||||
func (m *MigrationManager) GetAppliedMigrations() ([]AppliedMigration, error) {
|
||||
rows, err := m.db.Query(`
|
||||
SELECT version, description, applied_at
|
||||
FROM schema_migrations
|
||||
ORDER BY version
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query applied migrations: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var migrations []AppliedMigration
|
||||
for rows.Next() {
|
||||
var migration AppliedMigration
|
||||
err := rows.Scan(&migration.Version, &migration.Description, &migration.AppliedAt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to scan migration: %w", err)
|
||||
}
|
||||
migrations = append(migrations, migration)
|
||||
}
|
||||
|
||||
return migrations, nil
|
||||
}
|
||||
|
||||
// ValidateSchema validates that the database schema is up to date
|
||||
func (m *MigrationManager) ValidateSchema() error {
|
||||
currentVersion, err := m.GetCurrentVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get current version: %w", err)
|
||||
}
|
||||
|
||||
migrations := GetMigrations()
|
||||
if len(migrations) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
latestVersion := migrations[len(migrations)-1].Version
|
||||
if currentVersion < latestVersion {
|
||||
return fmt.Errorf("schema is outdated: current version %d, latest version %d", currentVersion, latestVersion)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppliedMigration represents a migration that has been applied
|
||||
type AppliedMigration struct {
|
||||
Version int
|
||||
Description string
|
||||
AppliedAt int64
|
||||
}
|
||||
|
||||
// getCurrentTimestamp returns the current timestamp in nanoseconds
|
||||
func getCurrentTimestamp() int64 {
|
||||
return time.Now().UnixNano()
|
||||
}
|
||||
|
||||
// CreateDatabase creates and initializes a new offset storage database
|
||||
func CreateDatabase(dbPath string) (*sql.DB, error) {
|
||||
// TODO: Support different database types (PostgreSQL, MySQL, etc.)
|
||||
// ASSUMPTION: Using SQLite for now, can be extended for other databases
|
||||
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open database: %w", err)
|
||||
}
|
||||
|
||||
// Configure SQLite for better performance
|
||||
pragmas := []string{
|
||||
"PRAGMA journal_mode=WAL", // Write-Ahead Logging for better concurrency
|
||||
"PRAGMA synchronous=NORMAL", // Balance between safety and performance
|
||||
"PRAGMA cache_size=10000", // Increase cache size
|
||||
"PRAGMA foreign_keys=ON", // Enable foreign key constraints
|
||||
"PRAGMA temp_store=MEMORY", // Store temporary tables in memory
|
||||
}
|
||||
|
||||
for _, pragma := range pragmas {
|
||||
_, err := db.Exec(pragma)
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return nil, fmt.Errorf("failed to set pragma %s: %w", pragma, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply migrations
|
||||
migrationManager := NewMigrationManager(db)
|
||||
err = migrationManager.ApplyMigrations()
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return nil, fmt.Errorf("failed to apply migrations: %w", err)
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// BackupDatabase creates a backup of the offset storage database
|
||||
func BackupDatabase(sourceDB *sql.DB, backupPath string) error {
|
||||
// TODO: Implement database backup functionality
|
||||
// ASSUMPTION: This would use database-specific backup mechanisms
|
||||
return fmt.Errorf("database backup not implemented yet")
|
||||
}
|
||||
|
||||
// RestoreDatabase restores a database from a backup
|
||||
func RestoreDatabase(backupPath, targetPath string) error {
|
||||
// TODO: Implement database restore functionality
|
||||
// ASSUMPTION: This would use database-specific restore mechanisms
|
||||
return fmt.Errorf("database restore not implemented yet")
|
||||
}
|
||||
@@ -1,394 +0,0 @@
|
||||
package offset
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
)
|
||||
|
||||
// OffsetEntry represents a mapping between Kafka offset and SMQ timestamp
|
||||
type OffsetEntry struct {
|
||||
KafkaOffset int64
|
||||
SMQTimestamp int64
|
||||
MessageSize int32
|
||||
}
|
||||
|
||||
// SQLOffsetStorage implements OffsetStorage using SQL database with _index column
|
||||
type SQLOffsetStorage struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewSQLOffsetStorage creates a new SQL-based offset storage
|
||||
func NewSQLOffsetStorage(db *sql.DB) (*SQLOffsetStorage, error) {
|
||||
storage := &SQLOffsetStorage{db: db}
|
||||
|
||||
// Initialize database schema
|
||||
if err := storage.initializeSchema(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize schema: %w", err)
|
||||
}
|
||||
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// initializeSchema creates the necessary tables for offset storage
|
||||
func (s *SQLOffsetStorage) initializeSchema() error {
|
||||
// TODO: Create offset storage tables with _index as hidden column
|
||||
// ASSUMPTION: Using SQLite-compatible syntax, may need adaptation for other databases
|
||||
|
||||
queries := []string{
|
||||
// Partition offset checkpoints table
|
||||
// TODO: Add _index as computed column when supported by database
|
||||
// ASSUMPTION: Using regular columns for now, _index concept preserved for future enhancement
|
||||
`CREATE TABLE IF NOT EXISTS partition_offset_checkpoints (
|
||||
partition_key TEXT PRIMARY KEY,
|
||||
ring_size INTEGER NOT NULL,
|
||||
range_start INTEGER NOT NULL,
|
||||
range_stop INTEGER NOT NULL,
|
||||
unix_time_ns INTEGER NOT NULL,
|
||||
checkpoint_offset INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
)`,
|
||||
|
||||
// Offset mappings table for detailed tracking
|
||||
// TODO: Add _index as computed column when supported by database
|
||||
`CREATE TABLE IF NOT EXISTS offset_mappings (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
partition_key TEXT NOT NULL,
|
||||
kafka_offset INTEGER NOT NULL,
|
||||
smq_timestamp INTEGER NOT NULL,
|
||||
message_size INTEGER NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
UNIQUE(partition_key, kafka_offset)
|
||||
)`,
|
||||
|
||||
// Indexes for performance
|
||||
`CREATE INDEX IF NOT EXISTS idx_partition_offset_checkpoints_partition
|
||||
ON partition_offset_checkpoints(partition_key)`,
|
||||
|
||||
`CREATE INDEX IF NOT EXISTS idx_offset_mappings_partition_offset
|
||||
ON offset_mappings(partition_key, kafka_offset)`,
|
||||
|
||||
`CREATE INDEX IF NOT EXISTS idx_offset_mappings_timestamp
|
||||
ON offset_mappings(partition_key, smq_timestamp)`,
|
||||
}
|
||||
|
||||
for _, query := range queries {
|
||||
if _, err := s.db.Exec(query); err != nil {
|
||||
return fmt.Errorf("failed to execute schema query: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveCheckpoint saves the checkpoint for a partition
|
||||
func (s *SQLOffsetStorage) SaveCheckpoint(namespace, topicName string, partition *schema_pb.Partition, offset int64) error {
|
||||
// Use TopicPartitionKey to ensure each topic has isolated checkpoint storage
|
||||
partitionKey := TopicPartitionKey(namespace, topicName, partition)
|
||||
now := time.Now().UnixNano()
|
||||
|
||||
// TODO: Use UPSERT for better performance
|
||||
// ASSUMPTION: SQLite REPLACE syntax, may need adaptation for other databases
|
||||
query := `
|
||||
REPLACE INTO partition_offset_checkpoints
|
||||
(partition_key, ring_size, range_start, range_stop, unix_time_ns, checkpoint_offset, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
`
|
||||
|
||||
_, err := s.db.Exec(query,
|
||||
partitionKey,
|
||||
partition.RingSize,
|
||||
partition.RangeStart,
|
||||
partition.RangeStop,
|
||||
partition.UnixTimeNs,
|
||||
offset,
|
||||
now,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save checkpoint: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadCheckpoint loads the checkpoint for a partition
|
||||
func (s *SQLOffsetStorage) LoadCheckpoint(namespace, topicName string, partition *schema_pb.Partition) (int64, error) {
|
||||
// Use TopicPartitionKey to match SaveCheckpoint
|
||||
partitionKey := TopicPartitionKey(namespace, topicName, partition)
|
||||
|
||||
query := `
|
||||
SELECT checkpoint_offset
|
||||
FROM partition_offset_checkpoints
|
||||
WHERE partition_key = ?
|
||||
`
|
||||
|
||||
var checkpointOffset int64
|
||||
err := s.db.QueryRow(query, partitionKey).Scan(&checkpointOffset)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return -1, fmt.Errorf("no checkpoint found")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("failed to load checkpoint: %w", err)
|
||||
}
|
||||
|
||||
return checkpointOffset, nil
|
||||
}
|
||||
|
||||
// GetHighestOffset finds the highest offset in storage for a partition
|
||||
func (s *SQLOffsetStorage) GetHighestOffset(namespace, topicName string, partition *schema_pb.Partition) (int64, error) {
|
||||
// Use TopicPartitionKey to match SaveCheckpoint
|
||||
partitionKey := TopicPartitionKey(namespace, topicName, partition)
|
||||
|
||||
// TODO: Use _index column for efficient querying
|
||||
// ASSUMPTION: kafka_offset represents the sequential offset we're tracking
|
||||
query := `
|
||||
SELECT MAX(kafka_offset)
|
||||
FROM offset_mappings
|
||||
WHERE partition_key = ?
|
||||
`
|
||||
|
||||
var highestOffset sql.NullInt64
|
||||
err := s.db.QueryRow(query, partitionKey).Scan(&highestOffset)
|
||||
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("failed to get highest offset: %w", err)
|
||||
}
|
||||
|
||||
if !highestOffset.Valid {
|
||||
return -1, fmt.Errorf("no records found")
|
||||
}
|
||||
|
||||
return highestOffset.Int64, nil
|
||||
}
|
||||
|
||||
// SaveOffsetMapping stores an offset mapping (extends OffsetStorage interface)
|
||||
func (s *SQLOffsetStorage) SaveOffsetMapping(partitionKey string, kafkaOffset, smqTimestamp int64, size int32) error {
|
||||
now := time.Now().UnixNano()
|
||||
|
||||
// TODO: Handle duplicate key conflicts gracefully
|
||||
// ASSUMPTION: Using INSERT OR REPLACE for conflict resolution
|
||||
query := `
|
||||
INSERT OR REPLACE INTO offset_mappings
|
||||
(partition_key, kafka_offset, smq_timestamp, message_size, created_at)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
`
|
||||
|
||||
_, err := s.db.Exec(query, partitionKey, kafkaOffset, smqTimestamp, size, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save offset mapping: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadOffsetMappings retrieves all offset mappings for a partition
|
||||
func (s *SQLOffsetStorage) LoadOffsetMappings(partitionKey string) ([]OffsetEntry, error) {
|
||||
// TODO: Add pagination for large result sets
|
||||
// ASSUMPTION: Loading all mappings for now, should be paginated in production
|
||||
query := `
|
||||
SELECT kafka_offset, smq_timestamp, message_size
|
||||
FROM offset_mappings
|
||||
WHERE partition_key = ?
|
||||
ORDER BY kafka_offset ASC
|
||||
`
|
||||
|
||||
rows, err := s.db.Query(query, partitionKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query offset mappings: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entries []OffsetEntry
|
||||
for rows.Next() {
|
||||
var entry OffsetEntry
|
||||
err := rows.Scan(&entry.KafkaOffset, &entry.SMQTimestamp, &entry.MessageSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to scan offset entry: %w", err)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error iterating offset mappings: %w", err)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// GetOffsetMappingsByRange retrieves offset mappings within a specific range
|
||||
func (s *SQLOffsetStorage) GetOffsetMappingsByRange(partitionKey string, startOffset, endOffset int64) ([]OffsetEntry, error) {
|
||||
// TODO: Use _index column for efficient range queries
|
||||
query := `
|
||||
SELECT kafka_offset, smq_timestamp, message_size
|
||||
FROM offset_mappings
|
||||
WHERE partition_key = ? AND kafka_offset >= ? AND kafka_offset <= ?
|
||||
ORDER BY kafka_offset ASC
|
||||
`
|
||||
|
||||
rows, err := s.db.Query(query, partitionKey, startOffset, endOffset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query offset range: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entries []OffsetEntry
|
||||
for rows.Next() {
|
||||
var entry OffsetEntry
|
||||
err := rows.Scan(&entry.KafkaOffset, &entry.SMQTimestamp, &entry.MessageSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to scan offset entry: %w", err)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// GetPartitionStats returns statistics about a partition's offset usage
|
||||
func (s *SQLOffsetStorage) GetPartitionStats(partitionKey string) (*PartitionStats, error) {
|
||||
query := `
|
||||
SELECT
|
||||
COUNT(*) as record_count,
|
||||
MIN(kafka_offset) as earliest_offset,
|
||||
MAX(kafka_offset) as latest_offset,
|
||||
SUM(message_size) as total_size,
|
||||
MIN(created_at) as first_record_time,
|
||||
MAX(created_at) as last_record_time
|
||||
FROM offset_mappings
|
||||
WHERE partition_key = ?
|
||||
`
|
||||
|
||||
var stats PartitionStats
|
||||
var earliestOffset, latestOffset sql.NullInt64
|
||||
var totalSize sql.NullInt64
|
||||
var firstRecordTime, lastRecordTime sql.NullInt64
|
||||
|
||||
err := s.db.QueryRow(query, partitionKey).Scan(
|
||||
&stats.RecordCount,
|
||||
&earliestOffset,
|
||||
&latestOffset,
|
||||
&totalSize,
|
||||
&firstRecordTime,
|
||||
&lastRecordTime,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get partition stats: %w", err)
|
||||
}
|
||||
|
||||
stats.PartitionKey = partitionKey
|
||||
|
||||
if earliestOffset.Valid {
|
||||
stats.EarliestOffset = earliestOffset.Int64
|
||||
} else {
|
||||
stats.EarliestOffset = -1
|
||||
}
|
||||
|
||||
if latestOffset.Valid {
|
||||
stats.LatestOffset = latestOffset.Int64
|
||||
stats.HighWaterMark = latestOffset.Int64 + 1
|
||||
} else {
|
||||
stats.LatestOffset = -1
|
||||
stats.HighWaterMark = 0
|
||||
}
|
||||
|
||||
if firstRecordTime.Valid {
|
||||
stats.FirstRecordTime = firstRecordTime.Int64
|
||||
}
|
||||
|
||||
if lastRecordTime.Valid {
|
||||
stats.LastRecordTime = lastRecordTime.Int64
|
||||
}
|
||||
|
||||
if totalSize.Valid {
|
||||
stats.TotalSize = totalSize.Int64
|
||||
}
|
||||
|
||||
return &stats, nil
|
||||
}
|
||||
|
||||
// CleanupOldMappings removes offset mappings older than the specified time
|
||||
func (s *SQLOffsetStorage) CleanupOldMappings(olderThanNs int64) error {
|
||||
// TODO: Add configurable cleanup policies
|
||||
// ASSUMPTION: Simple time-based cleanup, could be enhanced with retention policies
|
||||
query := `
|
||||
DELETE FROM offset_mappings
|
||||
WHERE created_at < ?
|
||||
`
|
||||
|
||||
result, err := s.db.Exec(query, olderThanNs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to cleanup old mappings: %w", err)
|
||||
}
|
||||
|
||||
rowsAffected, _ := result.RowsAffected()
|
||||
if rowsAffected > 0 {
|
||||
// Log cleanup activity
|
||||
fmt.Printf("Cleaned up %d old offset mappings\n", rowsAffected)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the database connection
|
||||
func (s *SQLOffsetStorage) Close() error {
|
||||
if s.db != nil {
|
||||
return s.db.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PartitionStats provides statistics about a partition's offset usage
|
||||
type PartitionStats struct {
|
||||
PartitionKey string
|
||||
RecordCount int64
|
||||
EarliestOffset int64
|
||||
LatestOffset int64
|
||||
HighWaterMark int64
|
||||
TotalSize int64
|
||||
FirstRecordTime int64
|
||||
LastRecordTime int64
|
||||
}
|
||||
|
||||
// GetAllPartitions returns a list of all partitions with offset data
|
||||
func (s *SQLOffsetStorage) GetAllPartitions() ([]string, error) {
|
||||
query := `
|
||||
SELECT DISTINCT partition_key
|
||||
FROM offset_mappings
|
||||
ORDER BY partition_key
|
||||
`
|
||||
|
||||
rows, err := s.db.Query(query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get all partitions: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var partitions []string
|
||||
for rows.Next() {
|
||||
var partitionKey string
|
||||
if err := rows.Scan(&partitionKey); err != nil {
|
||||
return nil, fmt.Errorf("failed to scan partition key: %w", err)
|
||||
}
|
||||
partitions = append(partitions, partitionKey)
|
||||
}
|
||||
|
||||
return partitions, nil
|
||||
}
|
||||
|
||||
// Vacuum performs database maintenance operations
|
||||
func (s *SQLOffsetStorage) Vacuum() error {
|
||||
// TODO: Add database-specific optimization commands
|
||||
// ASSUMPTION: SQLite VACUUM command, may need adaptation for other databases
|
||||
_, err := s.db.Exec("VACUUM")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to vacuum database: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,516 +0,0 @@
|
||||
package offset
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3" // SQLite driver
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
)
|
||||
|
||||
func createTestDB(t *testing.T) *sql.DB {
|
||||
// Create temporary database file
|
||||
tmpFile, err := os.CreateTemp("", "offset_test_*.db")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp database file: %v", err)
|
||||
}
|
||||
tmpFile.Close()
|
||||
|
||||
// Clean up the file when test completes
|
||||
t.Cleanup(func() {
|
||||
os.Remove(tmpFile.Name())
|
||||
})
|
||||
|
||||
db, err := sql.Open("sqlite3", tmpFile.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open database: %v", err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
db.Close()
|
||||
})
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func createTestPartitionForSQL() *schema_pb.Partition {
|
||||
return &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 0,
|
||||
RangeStop: 31,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestSQLOffsetStorage_InitializeSchema(t *testing.T) {
|
||||
db := createTestDB(t)
|
||||
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
// Verify tables were created
|
||||
tables := []string{
|
||||
"partition_offset_checkpoints",
|
||||
"offset_mappings",
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
var count int
|
||||
err := db.QueryRow("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name=?", table).Scan(&count)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check table %s: %v", table, err)
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
t.Errorf("Table %s was not created", table)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSQLOffsetStorage_SaveLoadCheckpoint(t *testing.T) {
|
||||
db := createTestDB(t)
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
partition := createTestPartitionForSQL()
|
||||
|
||||
// Test saving checkpoint
|
||||
err = storage.SaveCheckpoint("test-namespace", "test-topic", partition, 100)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save checkpoint: %v", err)
|
||||
}
|
||||
|
||||
// Test loading checkpoint
|
||||
checkpoint, err := storage.LoadCheckpoint("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load checkpoint: %v", err)
|
||||
}
|
||||
|
||||
if checkpoint != 100 {
|
||||
t.Errorf("Expected checkpoint 100, got %d", checkpoint)
|
||||
}
|
||||
|
||||
// Test updating checkpoint
|
||||
err = storage.SaveCheckpoint("test-namespace", "test-topic", partition, 200)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update checkpoint: %v", err)
|
||||
}
|
||||
|
||||
checkpoint, err = storage.LoadCheckpoint("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load updated checkpoint: %v", err)
|
||||
}
|
||||
|
||||
if checkpoint != 200 {
|
||||
t.Errorf("Expected updated checkpoint 200, got %d", checkpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSQLOffsetStorage_LoadCheckpointNotFound(t *testing.T) {
|
||||
db := createTestDB(t)
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
partition := createTestPartitionForSQL()
|
||||
|
||||
// Test loading non-existent checkpoint
|
||||
_, err = storage.LoadCheckpoint("test-namespace", "test-topic", partition)
|
||||
if err == nil {
|
||||
t.Error("Expected error for non-existent checkpoint")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSQLOffsetStorage_SaveLoadOffsetMappings(t *testing.T) {
|
||||
db := createTestDB(t)
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
partition := createTestPartitionForSQL()
|
||||
partitionKey := partitionKey(partition)
|
||||
|
||||
// Save multiple offset mappings
|
||||
mappings := []struct {
|
||||
offset int64
|
||||
timestamp int64
|
||||
size int32
|
||||
}{
|
||||
{0, 1000, 100},
|
||||
{1, 2000, 150},
|
||||
{2, 3000, 200},
|
||||
}
|
||||
|
||||
for _, mapping := range mappings {
|
||||
err := storage.SaveOffsetMapping(partitionKey, mapping.offset, mapping.timestamp, mapping.size)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save offset mapping: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Load offset mappings
|
||||
entries, err := storage.LoadOffsetMappings(partitionKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load offset mappings: %v", err)
|
||||
}
|
||||
|
||||
if len(entries) != len(mappings) {
|
||||
t.Errorf("Expected %d entries, got %d", len(mappings), len(entries))
|
||||
}
|
||||
|
||||
// Verify entries are sorted by offset
|
||||
for i, entry := range entries {
|
||||
expected := mappings[i]
|
||||
if entry.KafkaOffset != expected.offset {
|
||||
t.Errorf("Entry %d: expected offset %d, got %d", i, expected.offset, entry.KafkaOffset)
|
||||
}
|
||||
if entry.SMQTimestamp != expected.timestamp {
|
||||
t.Errorf("Entry %d: expected timestamp %d, got %d", i, expected.timestamp, entry.SMQTimestamp)
|
||||
}
|
||||
if entry.MessageSize != expected.size {
|
||||
t.Errorf("Entry %d: expected size %d, got %d", i, expected.size, entry.MessageSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSQLOffsetStorage_GetHighestOffset(t *testing.T) {
|
||||
db := createTestDB(t)
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
partition := createTestPartitionForSQL()
|
||||
partitionKey := TopicPartitionKey("test-namespace", "test-topic", partition)
|
||||
|
||||
// Test empty partition
|
||||
_, err = storage.GetHighestOffset("test-namespace", "test-topic", partition)
|
||||
if err == nil {
|
||||
t.Error("Expected error for empty partition")
|
||||
}
|
||||
|
||||
// Add some offset mappings
|
||||
offsets := []int64{5, 1, 3, 2, 4}
|
||||
for _, offset := range offsets {
|
||||
err := storage.SaveOffsetMapping(partitionKey, offset, offset*1000, 100)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save offset mapping: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get highest offset
|
||||
highest, err := storage.GetHighestOffset("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get highest offset: %v", err)
|
||||
}
|
||||
|
||||
if highest != 5 {
|
||||
t.Errorf("Expected highest offset 5, got %d", highest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSQLOffsetStorage_GetOffsetMappingsByRange(t *testing.T) {
|
||||
db := createTestDB(t)
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
partition := createTestPartitionForSQL()
|
||||
partitionKey := partitionKey(partition)
|
||||
|
||||
// Add offset mappings
|
||||
for i := int64(0); i < 10; i++ {
|
||||
err := storage.SaveOffsetMapping(partitionKey, i, i*1000, 100)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save offset mapping: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get range of offsets
|
||||
entries, err := storage.GetOffsetMappingsByRange(partitionKey, 3, 7)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get offset range: %v", err)
|
||||
}
|
||||
|
||||
expectedCount := 5 // offsets 3, 4, 5, 6, 7
|
||||
if len(entries) != expectedCount {
|
||||
t.Errorf("Expected %d entries, got %d", expectedCount, len(entries))
|
||||
}
|
||||
|
||||
// Verify range
|
||||
for i, entry := range entries {
|
||||
expectedOffset := int64(3 + i)
|
||||
if entry.KafkaOffset != expectedOffset {
|
||||
t.Errorf("Entry %d: expected offset %d, got %d", i, expectedOffset, entry.KafkaOffset)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSQLOffsetStorage_GetPartitionStats(t *testing.T) {
|
||||
db := createTestDB(t)
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
partition := createTestPartitionForSQL()
|
||||
partitionKey := partitionKey(partition)
|
||||
|
||||
// Test empty partition stats
|
||||
stats, err := storage.GetPartitionStats(partitionKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get empty partition stats: %v", err)
|
||||
}
|
||||
|
||||
if stats.RecordCount != 0 {
|
||||
t.Errorf("Expected record count 0, got %d", stats.RecordCount)
|
||||
}
|
||||
|
||||
if stats.EarliestOffset != -1 {
|
||||
t.Errorf("Expected earliest offset -1, got %d", stats.EarliestOffset)
|
||||
}
|
||||
|
||||
// Add some data
|
||||
sizes := []int32{100, 150, 200}
|
||||
for i, size := range sizes {
|
||||
err := storage.SaveOffsetMapping(partitionKey, int64(i), int64(i*1000), size)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save offset mapping: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get stats with data
|
||||
stats, err = storage.GetPartitionStats(partitionKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get partition stats: %v", err)
|
||||
}
|
||||
|
||||
if stats.RecordCount != 3 {
|
||||
t.Errorf("Expected record count 3, got %d", stats.RecordCount)
|
||||
}
|
||||
|
||||
if stats.EarliestOffset != 0 {
|
||||
t.Errorf("Expected earliest offset 0, got %d", stats.EarliestOffset)
|
||||
}
|
||||
|
||||
if stats.LatestOffset != 2 {
|
||||
t.Errorf("Expected latest offset 2, got %d", stats.LatestOffset)
|
||||
}
|
||||
|
||||
if stats.HighWaterMark != 3 {
|
||||
t.Errorf("Expected high water mark 3, got %d", stats.HighWaterMark)
|
||||
}
|
||||
|
||||
expectedTotalSize := int64(100 + 150 + 200)
|
||||
if stats.TotalSize != expectedTotalSize {
|
||||
t.Errorf("Expected total size %d, got %d", expectedTotalSize, stats.TotalSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSQLOffsetStorage_GetAllPartitions(t *testing.T) {
|
||||
db := createTestDB(t)
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
// Test empty database
|
||||
partitions, err := storage.GetAllPartitions()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get all partitions: %v", err)
|
||||
}
|
||||
|
||||
if len(partitions) != 0 {
|
||||
t.Errorf("Expected 0 partitions, got %d", len(partitions))
|
||||
}
|
||||
|
||||
// Add data for multiple partitions
|
||||
partition1 := createTestPartitionForSQL()
|
||||
partition2 := &schema_pb.Partition{
|
||||
RingSize: 1024,
|
||||
RangeStart: 32,
|
||||
RangeStop: 63,
|
||||
UnixTimeNs: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
partitionKey1 := partitionKey(partition1)
|
||||
partitionKey2 := partitionKey(partition2)
|
||||
|
||||
storage.SaveOffsetMapping(partitionKey1, 0, 1000, 100)
|
||||
storage.SaveOffsetMapping(partitionKey2, 0, 2000, 150)
|
||||
|
||||
// Get all partitions
|
||||
partitions, err = storage.GetAllPartitions()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get all partitions: %v", err)
|
||||
}
|
||||
|
||||
if len(partitions) != 2 {
|
||||
t.Errorf("Expected 2 partitions, got %d", len(partitions))
|
||||
}
|
||||
|
||||
// Verify partition keys are present
|
||||
partitionMap := make(map[string]bool)
|
||||
for _, p := range partitions {
|
||||
partitionMap[p] = true
|
||||
}
|
||||
|
||||
if !partitionMap[partitionKey1] {
|
||||
t.Errorf("Partition key %s not found", partitionKey1)
|
||||
}
|
||||
|
||||
if !partitionMap[partitionKey2] {
|
||||
t.Errorf("Partition key %s not found", partitionKey2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSQLOffsetStorage_CleanupOldMappings(t *testing.T) {
|
||||
db := createTestDB(t)
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
partition := createTestPartitionForSQL()
|
||||
partitionKey := partitionKey(partition)
|
||||
|
||||
// Add mappings with different timestamps
|
||||
now := time.Now().UnixNano()
|
||||
|
||||
// Add old mapping by directly inserting with old timestamp
|
||||
oldTime := now - (24 * time.Hour).Nanoseconds() // 24 hours ago
|
||||
_, err = db.Exec(`
|
||||
INSERT INTO offset_mappings
|
||||
(partition_key, kafka_offset, smq_timestamp, message_size, created_at)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
`, partitionKey, 0, oldTime, 100, oldTime)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to insert old mapping: %v", err)
|
||||
}
|
||||
|
||||
// Add recent mapping
|
||||
storage.SaveOffsetMapping(partitionKey, 1, now, 150)
|
||||
|
||||
// Verify both mappings exist
|
||||
entries, err := storage.LoadOffsetMappings(partitionKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load mappings: %v", err)
|
||||
}
|
||||
|
||||
if len(entries) != 2 {
|
||||
t.Errorf("Expected 2 mappings before cleanup, got %d", len(entries))
|
||||
}
|
||||
|
||||
// Cleanup old mappings (older than 12 hours)
|
||||
cutoffTime := now - (12 * time.Hour).Nanoseconds()
|
||||
err = storage.CleanupOldMappings(cutoffTime)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to cleanup old mappings: %v", err)
|
||||
}
|
||||
|
||||
// Verify only recent mapping remains
|
||||
entries, err = storage.LoadOffsetMappings(partitionKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load mappings after cleanup: %v", err)
|
||||
}
|
||||
|
||||
if len(entries) != 1 {
|
||||
t.Errorf("Expected 1 mapping after cleanup, got %d", len(entries))
|
||||
}
|
||||
|
||||
if entries[0].KafkaOffset != 1 {
|
||||
t.Errorf("Expected remaining mapping offset 1, got %d", entries[0].KafkaOffset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSQLOffsetStorage_Vacuum(t *testing.T) {
|
||||
db := createTestDB(t)
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
// Vacuum should not fail on empty database
|
||||
err = storage.Vacuum()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to vacuum database: %v", err)
|
||||
}
|
||||
|
||||
// Add some data and vacuum again
|
||||
partition := createTestPartitionForSQL()
|
||||
partitionKey := partitionKey(partition)
|
||||
storage.SaveOffsetMapping(partitionKey, 0, 1000, 100)
|
||||
|
||||
err = storage.Vacuum()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to vacuum database with data: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSQLOffsetStorage_ConcurrentAccess(t *testing.T) {
|
||||
db := createTestDB(t)
|
||||
storage, err := NewSQLOffsetStorage(db)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SQL storage: %v", err)
|
||||
}
|
||||
defer storage.Close()
|
||||
|
||||
partition := createTestPartitionForSQL()
|
||||
partitionKey := partitionKey(partition)
|
||||
|
||||
// Test concurrent writes
|
||||
const numGoroutines = 10
|
||||
const offsetsPerGoroutine = 10
|
||||
|
||||
done := make(chan bool, numGoroutines)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func(goroutineID int) {
|
||||
defer func() { done <- true }()
|
||||
|
||||
for j := 0; j < offsetsPerGoroutine; j++ {
|
||||
offset := int64(goroutineID*offsetsPerGoroutine + j)
|
||||
err := storage.SaveOffsetMapping(partitionKey, offset, offset*1000, 100)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to save offset mapping %d: %v", offset, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for all goroutines to complete
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
// Verify all mappings were saved
|
||||
entries, err := storage.LoadOffsetMappings(partitionKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load mappings: %v", err)
|
||||
}
|
||||
|
||||
expectedCount := numGoroutines * offsetsPerGoroutine
|
||||
if len(entries) != expectedCount {
|
||||
t.Errorf("Expected %d mappings, got %d", expectedCount, len(entries))
|
||||
}
|
||||
}
|
||||
@@ -1,457 +0,0 @@
|
||||
package offset
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
)
|
||||
|
||||
func TestOffsetSubscriber_CreateSubscription(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
subscriber := NewOffsetSubscriber(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Assign some offsets first
|
||||
registry.AssignOffsets("test-namespace", "test-topic", partition, 10)
|
||||
|
||||
// Test EXACT_OFFSET subscription
|
||||
sub, err := subscriber.CreateSubscription("test-sub-1", "test-namespace", "test-topic", partition, schema_pb.OffsetType_EXACT_OFFSET, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create EXACT_OFFSET subscription: %v", err)
|
||||
}
|
||||
|
||||
if sub.StartOffset != 5 {
|
||||
t.Errorf("Expected start offset 5, got %d", sub.StartOffset)
|
||||
}
|
||||
if sub.CurrentOffset != 5 {
|
||||
t.Errorf("Expected current offset 5, got %d", sub.CurrentOffset)
|
||||
}
|
||||
|
||||
// Test RESET_TO_LATEST subscription
|
||||
sub2, err := subscriber.CreateSubscription("test-sub-2", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_LATEST, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create RESET_TO_LATEST subscription: %v", err)
|
||||
}
|
||||
|
||||
if sub2.StartOffset != 10 { // Should be at high water mark
|
||||
t.Errorf("Expected start offset 10, got %d", sub2.StartOffset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetSubscriber_InvalidSubscription(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
subscriber := NewOffsetSubscriber(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Assign some offsets
|
||||
registry.AssignOffsets("test-namespace", "test-topic", partition, 5)
|
||||
|
||||
// Test invalid offset (beyond high water mark)
|
||||
_, err := subscriber.CreateSubscription("invalid-sub", "test-namespace", "test-topic", partition, schema_pb.OffsetType_EXACT_OFFSET, 10)
|
||||
if err == nil {
|
||||
t.Error("Expected error for offset beyond high water mark")
|
||||
}
|
||||
|
||||
// Test negative offset
|
||||
_, err = subscriber.CreateSubscription("invalid-sub-2", "test-namespace", "test-topic", partition, schema_pb.OffsetType_EXACT_OFFSET, -1)
|
||||
if err == nil {
|
||||
t.Error("Expected error for negative offset")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetSubscriber_DuplicateSubscription(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
subscriber := NewOffsetSubscriber(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Create first subscription
|
||||
_, err := subscriber.CreateSubscription("duplicate-sub", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create first subscription: %v", err)
|
||||
}
|
||||
|
||||
// Try to create duplicate
|
||||
_, err = subscriber.CreateSubscription("duplicate-sub", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
|
||||
if err == nil {
|
||||
t.Error("Expected error for duplicate subscription ID")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetSubscription_SeekToOffset(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
subscriber := NewOffsetSubscriber(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Assign offsets
|
||||
registry.AssignOffsets("test-namespace", "test-topic", partition, 20)
|
||||
|
||||
// Create subscription
|
||||
sub, err := subscriber.CreateSubscription("seek-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Test valid seek
|
||||
err = sub.SeekToOffset(10)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to seek to offset 10: %v", err)
|
||||
}
|
||||
|
||||
if sub.CurrentOffset != 10 {
|
||||
t.Errorf("Expected current offset 10, got %d", sub.CurrentOffset)
|
||||
}
|
||||
|
||||
// Test invalid seek (beyond high water mark)
|
||||
err = sub.SeekToOffset(25)
|
||||
if err == nil {
|
||||
t.Error("Expected error for seek beyond high water mark")
|
||||
}
|
||||
|
||||
// Test negative seek
|
||||
err = sub.SeekToOffset(-1)
|
||||
if err == nil {
|
||||
t.Error("Expected error for negative seek offset")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetSubscription_AdvanceOffset(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
subscriber := NewOffsetSubscriber(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Create subscription
|
||||
sub, err := subscriber.CreateSubscription("advance-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Test single advance
|
||||
initialOffset := sub.GetNextOffset()
|
||||
sub.AdvanceOffset()
|
||||
|
||||
if sub.GetNextOffset() != initialOffset+1 {
|
||||
t.Errorf("Expected offset %d, got %d", initialOffset+1, sub.GetNextOffset())
|
||||
}
|
||||
|
||||
// Test batch advance
|
||||
sub.AdvanceOffsetBy(5)
|
||||
|
||||
if sub.GetNextOffset() != initialOffset+6 {
|
||||
t.Errorf("Expected offset %d, got %d", initialOffset+6, sub.GetNextOffset())
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetSubscription_GetLag(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
subscriber := NewOffsetSubscriber(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Assign offsets
|
||||
registry.AssignOffsets("test-namespace", "test-topic", partition, 15)
|
||||
|
||||
// Create subscription at offset 5
|
||||
sub, err := subscriber.CreateSubscription("lag-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_EXACT_OFFSET, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Check initial lag
|
||||
lag, err := sub.GetLag()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get lag: %v", err)
|
||||
}
|
||||
|
||||
expectedLag := int64(15 - 5) // hwm - current
|
||||
if lag != expectedLag {
|
||||
t.Errorf("Expected lag %d, got %d", expectedLag, lag)
|
||||
}
|
||||
|
||||
// Advance and check lag again
|
||||
sub.AdvanceOffsetBy(3)
|
||||
|
||||
lag, err = sub.GetLag()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get lag after advance: %v", err)
|
||||
}
|
||||
|
||||
expectedLag = int64(15 - 8) // hwm - current
|
||||
if lag != expectedLag {
|
||||
t.Errorf("Expected lag %d after advance, got %d", expectedLag, lag)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetSubscription_IsAtEnd(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
subscriber := NewOffsetSubscriber(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Assign offsets
|
||||
registry.AssignOffsets("test-namespace", "test-topic", partition, 10)
|
||||
|
||||
// Create subscription at end
|
||||
sub, err := subscriber.CreateSubscription("end-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_LATEST, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Should be at end
|
||||
atEnd, err := sub.IsAtEnd()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check if at end: %v", err)
|
||||
}
|
||||
|
||||
if !atEnd {
|
||||
t.Error("Expected subscription to be at end")
|
||||
}
|
||||
|
||||
// Seek to middle and check again
|
||||
sub.SeekToOffset(5)
|
||||
|
||||
atEnd, err = sub.IsAtEnd()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check if at end after seek: %v", err)
|
||||
}
|
||||
|
||||
if atEnd {
|
||||
t.Error("Expected subscription not to be at end after seek")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetSubscription_GetOffsetRange(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
subscriber := NewOffsetSubscriber(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Assign offsets
|
||||
registry.AssignOffsets("test-namespace", "test-topic", partition, 20)
|
||||
|
||||
// Create subscription
|
||||
sub, err := subscriber.CreateSubscription("range-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_EXACT_OFFSET, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Test normal range
|
||||
offsetRange, err := sub.GetOffsetRange(10)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get offset range: %v", err)
|
||||
}
|
||||
|
||||
if offsetRange.StartOffset != 5 {
|
||||
t.Errorf("Expected start offset 5, got %d", offsetRange.StartOffset)
|
||||
}
|
||||
if offsetRange.EndOffset != 14 {
|
||||
t.Errorf("Expected end offset 14, got %d", offsetRange.EndOffset)
|
||||
}
|
||||
if offsetRange.Count != 10 {
|
||||
t.Errorf("Expected count 10, got %d", offsetRange.Count)
|
||||
}
|
||||
|
||||
// Test range that exceeds high water mark
|
||||
sub.SeekToOffset(15)
|
||||
offsetRange, err = sub.GetOffsetRange(10)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get offset range near end: %v", err)
|
||||
}
|
||||
|
||||
if offsetRange.StartOffset != 15 {
|
||||
t.Errorf("Expected start offset 15, got %d", offsetRange.StartOffset)
|
||||
}
|
||||
if offsetRange.EndOffset != 19 { // Should be capped at hwm-1
|
||||
t.Errorf("Expected end offset 19, got %d", offsetRange.EndOffset)
|
||||
}
|
||||
if offsetRange.Count != 5 {
|
||||
t.Errorf("Expected count 5, got %d", offsetRange.Count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetSubscription_EmptyRange(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
subscriber := NewOffsetSubscriber(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Assign offsets
|
||||
registry.AssignOffsets("test-namespace", "test-topic", partition, 10)
|
||||
|
||||
// Create subscription at end
|
||||
sub, err := subscriber.CreateSubscription("empty-range-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_LATEST, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Request range when at end
|
||||
offsetRange, err := sub.GetOffsetRange(5)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get offset range at end: %v", err)
|
||||
}
|
||||
|
||||
if offsetRange.Count != 0 {
|
||||
t.Errorf("Expected empty range (count 0), got count %d", offsetRange.Count)
|
||||
}
|
||||
|
||||
if offsetRange.StartOffset != 10 {
|
||||
t.Errorf("Expected start offset 10, got %d", offsetRange.StartOffset)
|
||||
}
|
||||
|
||||
if offsetRange.EndOffset != 9 { // Empty range: end < start
|
||||
t.Errorf("Expected end offset 9 (empty range), got %d", offsetRange.EndOffset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetSeeker_ValidateOffsetRange(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
seeker := NewOffsetSeeker(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Assign offsets
|
||||
registry.AssignOffsets("test-namespace", "test-topic", partition, 15)
|
||||
|
||||
// Test valid range
|
||||
err := seeker.ValidateOffsetRange("test-namespace", "test-topic", partition, 5, 10)
|
||||
if err != nil {
|
||||
t.Errorf("Valid range should not return error: %v", err)
|
||||
}
|
||||
|
||||
// Test invalid ranges
|
||||
testCases := []struct {
|
||||
name string
|
||||
startOffset int64
|
||||
endOffset int64
|
||||
expectError bool
|
||||
}{
|
||||
{"negative start", -1, 5, true},
|
||||
{"end before start", 10, 5, true},
|
||||
{"start beyond hwm", 20, 25, true},
|
||||
{"valid range", 0, 14, false},
|
||||
{"single offset", 5, 5, false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := seeker.ValidateOffsetRange("test-namespace", "test-topic", partition, tc.startOffset, tc.endOffset)
|
||||
if tc.expectError && err == nil {
|
||||
t.Error("Expected error but got none")
|
||||
}
|
||||
if !tc.expectError && err != nil {
|
||||
t.Errorf("Expected no error but got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetSeeker_GetAvailableOffsetRange(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
seeker := NewOffsetSeeker(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Test empty partition
|
||||
offsetRange, err := seeker.GetAvailableOffsetRange("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get available range for empty partition: %v", err)
|
||||
}
|
||||
|
||||
if offsetRange.Count != 0 {
|
||||
t.Errorf("Expected empty range for empty partition, got count %d", offsetRange.Count)
|
||||
}
|
||||
|
||||
// Assign offsets and test again
|
||||
registry.AssignOffsets("test-namespace", "test-topic", partition, 25)
|
||||
|
||||
offsetRange, err = seeker.GetAvailableOffsetRange("test-namespace", "test-topic", partition)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get available range: %v", err)
|
||||
}
|
||||
|
||||
if offsetRange.StartOffset != 0 {
|
||||
t.Errorf("Expected start offset 0, got %d", offsetRange.StartOffset)
|
||||
}
|
||||
if offsetRange.EndOffset != 24 {
|
||||
t.Errorf("Expected end offset 24, got %d", offsetRange.EndOffset)
|
||||
}
|
||||
if offsetRange.Count != 25 {
|
||||
t.Errorf("Expected count 25, got %d", offsetRange.Count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetSubscriber_CloseSubscription(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
subscriber := NewOffsetSubscriber(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Create subscription
|
||||
sub, err := subscriber.CreateSubscription("close-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
// Verify subscription exists
|
||||
_, err = subscriber.GetSubscription("close-test")
|
||||
if err != nil {
|
||||
t.Fatalf("Subscription should exist: %v", err)
|
||||
}
|
||||
|
||||
// Close subscription
|
||||
err = subscriber.CloseSubscription("close-test")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to close subscription: %v", err)
|
||||
}
|
||||
|
||||
// Verify subscription is gone
|
||||
_, err = subscriber.GetSubscription("close-test")
|
||||
if err == nil {
|
||||
t.Error("Subscription should not exist after close")
|
||||
}
|
||||
|
||||
// Verify subscription is marked inactive
|
||||
if sub.IsActive {
|
||||
t.Error("Subscription should be marked inactive after close")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOffsetSubscription_InactiveOperations(t *testing.T) {
|
||||
storage := NewInMemoryOffsetStorage()
|
||||
registry := NewPartitionOffsetRegistry(storage)
|
||||
subscriber := NewOffsetSubscriber(registry)
|
||||
partition := createTestPartition()
|
||||
|
||||
// Create and close subscription
|
||||
sub, err := subscriber.CreateSubscription("inactive-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subscription: %v", err)
|
||||
}
|
||||
|
||||
subscriber.CloseSubscription("inactive-test")
|
||||
|
||||
// Test operations on inactive subscription
|
||||
err = sub.SeekToOffset(5)
|
||||
if err == nil {
|
||||
t.Error("Expected error for seek on inactive subscription")
|
||||
}
|
||||
|
||||
_, err = sub.GetLag()
|
||||
if err == nil {
|
||||
t.Error("Expected error for GetLag on inactive subscription")
|
||||
}
|
||||
|
||||
_, err = sub.IsAtEnd()
|
||||
if err == nil {
|
||||
t.Error("Expected error for IsAtEnd on inactive subscription")
|
||||
}
|
||||
|
||||
_, err = sub.GetOffsetRange(10)
|
||||
if err == nil {
|
||||
t.Error("Expected error for GetOffsetRange on inactive subscription")
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,6 @@
|
||||
package pub_balancer
|
||||
|
||||
import (
|
||||
"math/rand/v2"
|
||||
"sort"
|
||||
|
||||
cmap "github.com/orcaman/concurrent-map/v2"
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
|
||||
"modernc.org/mathutil"
|
||||
)
|
||||
import ()
|
||||
|
||||
func (balancer *PubBalancer) RepairTopics() []BalanceAction {
|
||||
action := BalanceTopicPartitionOnBrokers(balancer.Brokers)
|
||||
@@ -17,107 +10,3 @@ func (balancer *PubBalancer) RepairTopics() []BalanceAction {
|
||||
type TopicPartitionInfo struct {
|
||||
Broker string
|
||||
}
|
||||
|
||||
// RepairMissingTopicPartitions check the stats of all brokers,
|
||||
// and repair the missing topic partitions on the brokers.
|
||||
func RepairMissingTopicPartitions(brokers cmap.ConcurrentMap[string, *BrokerStats]) (actions []BalanceAction) {
|
||||
|
||||
// find all topic partitions
|
||||
topicToTopicPartitions := make(map[topic.Topic]map[topic.Partition]*TopicPartitionInfo)
|
||||
for brokerStatsItem := range brokers.IterBuffered() {
|
||||
broker, brokerStats := brokerStatsItem.Key, brokerStatsItem.Val
|
||||
for topicPartitionStatsItem := range brokerStats.TopicPartitionStats.IterBuffered() {
|
||||
topicPartitionStat := topicPartitionStatsItem.Val
|
||||
topicPartitionToInfo, found := topicToTopicPartitions[topicPartitionStat.Topic]
|
||||
if !found {
|
||||
topicPartitionToInfo = make(map[topic.Partition]*TopicPartitionInfo)
|
||||
topicToTopicPartitions[topicPartitionStat.Topic] = topicPartitionToInfo
|
||||
}
|
||||
tpi, found := topicPartitionToInfo[topicPartitionStat.Partition]
|
||||
if !found {
|
||||
tpi = &TopicPartitionInfo{}
|
||||
topicPartitionToInfo[topicPartitionStat.Partition] = tpi
|
||||
}
|
||||
tpi.Broker = broker
|
||||
}
|
||||
}
|
||||
|
||||
// collect all brokers as candidates
|
||||
candidates := make([]string, 0, brokers.Count())
|
||||
for brokerStatsItem := range brokers.IterBuffered() {
|
||||
candidates = append(candidates, brokerStatsItem.Key)
|
||||
}
|
||||
|
||||
// find the missing topic partitions
|
||||
for t, topicPartitionToInfo := range topicToTopicPartitions {
|
||||
missingPartitions := EachTopicRepairMissingTopicPartitions(t, topicPartitionToInfo)
|
||||
for _, partition := range missingPartitions {
|
||||
actions = append(actions, BalanceActionCreate{
|
||||
TopicPartition: topic.TopicPartition{
|
||||
Topic: t,
|
||||
Partition: partition,
|
||||
},
|
||||
TargetBroker: candidates[rand.IntN(len(candidates))],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return actions
|
||||
}
|
||||
|
||||
func EachTopicRepairMissingTopicPartitions(t topic.Topic, info map[topic.Partition]*TopicPartitionInfo) (missingPartitions []topic.Partition) {
|
||||
|
||||
// find the missing topic partitions
|
||||
var partitions []topic.Partition
|
||||
for partition := range info {
|
||||
partitions = append(partitions, partition)
|
||||
}
|
||||
return findMissingPartitions(partitions, MaxPartitionCount)
|
||||
}
|
||||
|
||||
// findMissingPartitions find the missing partitions
|
||||
func findMissingPartitions(partitions []topic.Partition, ringSize int32) (missingPartitions []topic.Partition) {
|
||||
// sort the partitions by range start
|
||||
sort.Slice(partitions, func(i, j int) bool {
|
||||
return partitions[i].RangeStart < partitions[j].RangeStart
|
||||
})
|
||||
|
||||
// calculate the average partition size
|
||||
var covered int32
|
||||
for _, partition := range partitions {
|
||||
covered += partition.RangeStop - partition.RangeStart
|
||||
}
|
||||
averagePartitionSize := covered / int32(len(partitions))
|
||||
|
||||
// find the missing partitions
|
||||
var coveredWatermark int32
|
||||
i := 0
|
||||
for i < len(partitions) {
|
||||
partition := partitions[i]
|
||||
if partition.RangeStart > coveredWatermark {
|
||||
upperBound := mathutil.MinInt32(coveredWatermark+averagePartitionSize, partition.RangeStart)
|
||||
missingPartitions = append(missingPartitions, topic.Partition{
|
||||
RangeStart: coveredWatermark,
|
||||
RangeStop: upperBound,
|
||||
RingSize: ringSize,
|
||||
})
|
||||
coveredWatermark = upperBound
|
||||
if coveredWatermark == partition.RangeStop {
|
||||
i++
|
||||
}
|
||||
} else {
|
||||
coveredWatermark = partition.RangeStop
|
||||
i++
|
||||
}
|
||||
}
|
||||
for coveredWatermark < ringSize {
|
||||
upperBound := mathutil.MinInt32(coveredWatermark+averagePartitionSize, ringSize)
|
||||
missingPartitions = append(missingPartitions, topic.Partition{
|
||||
RangeStart: coveredWatermark,
|
||||
RangeStop: upperBound,
|
||||
RingSize: ringSize,
|
||||
})
|
||||
coveredWatermark = upperBound
|
||||
}
|
||||
return missingPartitions
|
||||
}
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
package pub_balancer
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
|
||||
)
|
||||
|
||||
func Test_findMissingPartitions(t *testing.T) {
|
||||
type args struct {
|
||||
partitions []topic.Partition
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantMissingPartitions []topic.Partition
|
||||
}{
|
||||
{
|
||||
name: "one partition",
|
||||
args: args{
|
||||
partitions: []topic.Partition{
|
||||
{RingSize: 1024, RangeStart: 0, RangeStop: 1024},
|
||||
},
|
||||
},
|
||||
wantMissingPartitions: nil,
|
||||
},
|
||||
{
|
||||
name: "two partitions",
|
||||
args: args{
|
||||
partitions: []topic.Partition{
|
||||
{RingSize: 1024, RangeStart: 0, RangeStop: 512},
|
||||
{RingSize: 1024, RangeStart: 512, RangeStop: 1024},
|
||||
},
|
||||
},
|
||||
wantMissingPartitions: nil,
|
||||
},
|
||||
{
|
||||
name: "four partitions, missing last two",
|
||||
args: args{
|
||||
partitions: []topic.Partition{
|
||||
{RingSize: 1024, RangeStart: 0, RangeStop: 256},
|
||||
{RingSize: 1024, RangeStart: 256, RangeStop: 512},
|
||||
},
|
||||
},
|
||||
wantMissingPartitions: []topic.Partition{
|
||||
{RingSize: 1024, RangeStart: 512, RangeStop: 768},
|
||||
{RingSize: 1024, RangeStart: 768, RangeStop: 1024},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "four partitions, missing first two",
|
||||
args: args{
|
||||
partitions: []topic.Partition{
|
||||
{RingSize: 1024, RangeStart: 512, RangeStop: 768},
|
||||
{RingSize: 1024, RangeStart: 768, RangeStop: 1024},
|
||||
},
|
||||
},
|
||||
wantMissingPartitions: []topic.Partition{
|
||||
{RingSize: 1024, RangeStart: 0, RangeStop: 256},
|
||||
{RingSize: 1024, RangeStart: 256, RangeStop: 512},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "four partitions, missing middle two",
|
||||
args: args{
|
||||
partitions: []topic.Partition{
|
||||
{RingSize: 1024, RangeStart: 0, RangeStop: 256},
|
||||
{RingSize: 1024, RangeStart: 768, RangeStop: 1024},
|
||||
},
|
||||
},
|
||||
wantMissingPartitions: []topic.Partition{
|
||||
{RingSize: 1024, RangeStart: 256, RangeStop: 512},
|
||||
{RingSize: 1024, RangeStart: 512, RangeStop: 768},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "four partitions, missing three",
|
||||
args: args{
|
||||
partitions: []topic.Partition{
|
||||
{RingSize: 1024, RangeStart: 512, RangeStop: 768},
|
||||
},
|
||||
},
|
||||
wantMissingPartitions: []topic.Partition{
|
||||
{RingSize: 1024, RangeStart: 0, RangeStop: 256},
|
||||
{RingSize: 1024, RangeStart: 256, RangeStop: 512},
|
||||
{RingSize: 1024, RangeStart: 768, RangeStop: 1024},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if gotMissingPartitions := findMissingPartitions(tt.args.partitions, 1024); !reflect.DeepEqual(gotMissingPartitions, tt.wantMissingPartitions) {
|
||||
t.Errorf("findMissingPartitions() = %v, want %v", gotMissingPartitions, tt.wantMissingPartitions)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
package segment
|
||||
|
||||
import (
|
||||
flatbuffers "github.com/google/flatbuffers/go"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/message_fbs"
|
||||
)
|
||||
|
||||
type MessageBatchBuilder struct {
|
||||
b *flatbuffers.Builder
|
||||
producerId int32
|
||||
producerEpoch int32
|
||||
segmentId int32
|
||||
flags int32
|
||||
messageOffsets []flatbuffers.UOffsetT
|
||||
segmentSeqBase int64
|
||||
segmentSeqLast int64
|
||||
tsMsBase int64
|
||||
tsMsLast int64
|
||||
}
|
||||
|
||||
func NewMessageBatchBuilder(b *flatbuffers.Builder,
|
||||
producerId int32,
|
||||
producerEpoch int32,
|
||||
segmentId int32,
|
||||
flags int32) *MessageBatchBuilder {
|
||||
|
||||
b.Reset()
|
||||
|
||||
return &MessageBatchBuilder{
|
||||
b: b,
|
||||
producerId: producerId,
|
||||
producerEpoch: producerEpoch,
|
||||
segmentId: segmentId,
|
||||
flags: flags,
|
||||
}
|
||||
}
|
||||
|
||||
func (builder *MessageBatchBuilder) AddMessage(segmentSeq int64, tsMs int64, properties map[string][]byte, key []byte, value []byte) {
|
||||
if builder.segmentSeqBase == 0 {
|
||||
builder.segmentSeqBase = segmentSeq
|
||||
}
|
||||
builder.segmentSeqLast = segmentSeq
|
||||
if builder.tsMsBase == 0 {
|
||||
builder.tsMsBase = tsMs
|
||||
}
|
||||
builder.tsMsLast = tsMs
|
||||
|
||||
var names, values, pairs []flatbuffers.UOffsetT
|
||||
for k, v := range properties {
|
||||
names = append(names, builder.b.CreateString(k))
|
||||
values = append(values, builder.b.CreateByteVector(v))
|
||||
}
|
||||
for i, _ := range names {
|
||||
message_fbs.NameValueStart(builder.b)
|
||||
message_fbs.NameValueAddName(builder.b, names[i])
|
||||
message_fbs.NameValueAddValue(builder.b, values[i])
|
||||
pair := message_fbs.NameValueEnd(builder.b)
|
||||
pairs = append(pairs, pair)
|
||||
}
|
||||
|
||||
message_fbs.MessageStartPropertiesVector(builder.b, len(properties))
|
||||
for i := len(pairs) - 1; i >= 0; i-- {
|
||||
builder.b.PrependUOffsetT(pairs[i])
|
||||
}
|
||||
propOffset := builder.b.EndVector(len(properties))
|
||||
|
||||
keyOffset := builder.b.CreateByteVector(key)
|
||||
valueOffset := builder.b.CreateByteVector(value)
|
||||
|
||||
message_fbs.MessageStart(builder.b)
|
||||
message_fbs.MessageAddSeqDelta(builder.b, int32(segmentSeq-builder.segmentSeqBase))
|
||||
message_fbs.MessageAddTsMsDelta(builder.b, int32(tsMs-builder.tsMsBase))
|
||||
|
||||
message_fbs.MessageAddProperties(builder.b, propOffset)
|
||||
message_fbs.MessageAddKey(builder.b, keyOffset)
|
||||
message_fbs.MessageAddData(builder.b, valueOffset)
|
||||
messageOffset := message_fbs.MessageEnd(builder.b)
|
||||
|
||||
builder.messageOffsets = append(builder.messageOffsets, messageOffset)
|
||||
|
||||
}
|
||||
|
||||
func (builder *MessageBatchBuilder) BuildMessageBatch() {
|
||||
message_fbs.MessageBatchStartMessagesVector(builder.b, len(builder.messageOffsets))
|
||||
for i := len(builder.messageOffsets) - 1; i >= 0; i-- {
|
||||
builder.b.PrependUOffsetT(builder.messageOffsets[i])
|
||||
}
|
||||
messagesOffset := builder.b.EndVector(len(builder.messageOffsets))
|
||||
|
||||
message_fbs.MessageBatchStart(builder.b)
|
||||
message_fbs.MessageBatchAddProducerId(builder.b, builder.producerId)
|
||||
message_fbs.MessageBatchAddProducerEpoch(builder.b, builder.producerEpoch)
|
||||
message_fbs.MessageBatchAddSegmentId(builder.b, builder.segmentId)
|
||||
message_fbs.MessageBatchAddFlags(builder.b, builder.flags)
|
||||
message_fbs.MessageBatchAddSegmentSeqBase(builder.b, builder.segmentSeqBase)
|
||||
message_fbs.MessageBatchAddSegmentSeqMaxDelta(builder.b, int32(builder.segmentSeqLast-builder.segmentSeqBase))
|
||||
message_fbs.MessageBatchAddTsMsBase(builder.b, builder.tsMsBase)
|
||||
message_fbs.MessageBatchAddTsMsMaxDelta(builder.b, int32(builder.tsMsLast-builder.tsMsBase))
|
||||
|
||||
message_fbs.MessageBatchAddMessages(builder.b, messagesOffset)
|
||||
|
||||
messageBatch := message_fbs.MessageBatchEnd(builder.b)
|
||||
|
||||
builder.b.Finish(messageBatch)
|
||||
}
|
||||
|
||||
func (builder *MessageBatchBuilder) GetBytes() []byte {
|
||||
return builder.b.FinishedBytes()
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package segment
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
flatbuffers "github.com/google/flatbuffers/go"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/message_fbs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMessageSerde(t *testing.T) {
|
||||
b := flatbuffers.NewBuilder(1024)
|
||||
|
||||
prop := make(map[string][]byte)
|
||||
prop["n1"] = []byte("v1")
|
||||
prop["n2"] = []byte("v2")
|
||||
|
||||
bb := NewMessageBatchBuilder(b, 1, 2, 3, 4)
|
||||
|
||||
bb.AddMessage(5, 6, prop, []byte("the primary key"), []byte("body is here"))
|
||||
bb.AddMessage(5, 7, prop, []byte("the primary 2"), []byte("body is 2"))
|
||||
|
||||
bb.BuildMessageBatch()
|
||||
|
||||
buf := bb.GetBytes()
|
||||
|
||||
println("serialized size", len(buf))
|
||||
|
||||
mb := message_fbs.GetRootAsMessageBatch(buf, 0)
|
||||
|
||||
assert.Equal(t, int32(1), mb.ProducerId())
|
||||
assert.Equal(t, int32(2), mb.ProducerEpoch())
|
||||
assert.Equal(t, int32(3), mb.SegmentId())
|
||||
assert.Equal(t, int32(4), mb.Flags())
|
||||
assert.Equal(t, int64(5), mb.SegmentSeqBase())
|
||||
assert.Equal(t, int32(0), mb.SegmentSeqMaxDelta())
|
||||
assert.Equal(t, int64(6), mb.TsMsBase())
|
||||
assert.Equal(t, int32(1), mb.TsMsMaxDelta())
|
||||
|
||||
assert.Equal(t, 2, mb.MessagesLength())
|
||||
|
||||
m := &message_fbs.Message{}
|
||||
mb.Messages(m, 0)
|
||||
|
||||
/*
|
||||
// the vector seems not consistent
|
||||
nv := &message_fbs.NameValue{}
|
||||
m.Properties(nv, 0)
|
||||
assert.Equal(t, "n1", string(nv.Name()))
|
||||
assert.Equal(t, "v1", string(nv.Value()))
|
||||
m.Properties(nv, 1)
|
||||
assert.Equal(t, "n2", string(nv.Name()))
|
||||
assert.Equal(t, "v2", string(nv.Value()))
|
||||
*/
|
||||
assert.Equal(t, []byte("the primary key"), m.Key())
|
||||
assert.Equal(t, []byte("body is here"), m.Data())
|
||||
|
||||
assert.Equal(t, int32(0), m.SeqDelta())
|
||||
assert.Equal(t, int32(0), m.TsMsDelta())
|
||||
|
||||
}
|
||||
@@ -28,28 +28,6 @@ func (imt *InflightMessageTracker) EnflightMessage(key []byte, tsNs int64) {
|
||||
imt.timestamps.EnflightTimestamp(tsNs)
|
||||
}
|
||||
|
||||
// IsMessageAcknowledged returns true if the message has been acknowledged.
|
||||
// If the message is older than the oldest inflight messages, returns false.
|
||||
// returns false if the message is inflight.
|
||||
// Otherwise, returns false if the message is old and can be ignored.
|
||||
func (imt *InflightMessageTracker) IsMessageAcknowledged(key []byte, tsNs int64) bool {
|
||||
imt.mu.Lock()
|
||||
defer imt.mu.Unlock()
|
||||
|
||||
if tsNs <= imt.timestamps.OldestAckedTimestamp() {
|
||||
return true
|
||||
}
|
||||
if tsNs > imt.timestamps.Latest() {
|
||||
return false
|
||||
}
|
||||
|
||||
if _, found := imt.messages[string(key)]; found {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// AcknowledgeMessage acknowledges the message with the key and timestamp.
|
||||
func (imt *InflightMessageTracker) AcknowledgeMessage(key []byte, tsNs int64) bool {
|
||||
// fmt.Printf("AcknowledgeMessage(%s,%d)\n", string(key), tsNs)
|
||||
@@ -164,8 +142,3 @@ func (rb *RingBuffer) AckTimestamp(timestamp int64) {
|
||||
func (rb *RingBuffer) OldestAckedTimestamp() int64 {
|
||||
return rb.maxAllAckedTs
|
||||
}
|
||||
|
||||
// Latest returns the most recently known timestamp in the ring buffer.
|
||||
func (rb *RingBuffer) Latest() int64 {
|
||||
return rb.maxTimestamp
|
||||
}
|
||||
|
||||
@@ -1,134 +0,0 @@
|
||||
package sub_coordinator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRingBuffer(t *testing.T) {
|
||||
// Initialize a RingBuffer with capacity 5
|
||||
rb := NewRingBuffer(5)
|
||||
|
||||
// Add timestamps to the buffer
|
||||
timestamps := []int64{100, 200, 300, 400, 500}
|
||||
for _, ts := range timestamps {
|
||||
rb.EnflightTimestamp(ts)
|
||||
}
|
||||
|
||||
// Test Add method and buffer size
|
||||
expectedSize := 5
|
||||
if rb.size != expectedSize {
|
||||
t.Errorf("Expected buffer size %d, got %d", expectedSize, rb.size)
|
||||
}
|
||||
|
||||
assert.Equal(t, int64(0), rb.OldestAckedTimestamp())
|
||||
assert.Equal(t, int64(500), rb.Latest())
|
||||
|
||||
rb.AckTimestamp(200)
|
||||
assert.Equal(t, int64(0), rb.OldestAckedTimestamp())
|
||||
rb.AckTimestamp(100)
|
||||
assert.Equal(t, int64(200), rb.OldestAckedTimestamp())
|
||||
|
||||
rb.EnflightTimestamp(int64(600))
|
||||
rb.EnflightTimestamp(int64(700))
|
||||
|
||||
rb.AckTimestamp(500)
|
||||
assert.Equal(t, int64(200), rb.OldestAckedTimestamp())
|
||||
rb.AckTimestamp(400)
|
||||
assert.Equal(t, int64(200), rb.OldestAckedTimestamp())
|
||||
rb.AckTimestamp(300)
|
||||
assert.Equal(t, int64(500), rb.OldestAckedTimestamp())
|
||||
|
||||
assert.Equal(t, int64(700), rb.Latest())
|
||||
}
|
||||
|
||||
func TestInflightMessageTracker(t *testing.T) {
|
||||
// Initialize an InflightMessageTracker with capacity 5
|
||||
tracker := NewInflightMessageTracker(5)
|
||||
|
||||
// Add inflight messages
|
||||
key := []byte("1")
|
||||
timestamp := int64(1)
|
||||
tracker.EnflightMessage(key, timestamp)
|
||||
|
||||
// Test IsMessageAcknowledged method
|
||||
isOld := tracker.IsMessageAcknowledged(key, timestamp-10)
|
||||
if !isOld {
|
||||
t.Error("Expected message to be old")
|
||||
}
|
||||
|
||||
// Test AcknowledgeMessage method
|
||||
acked := tracker.AcknowledgeMessage(key, timestamp)
|
||||
if !acked {
|
||||
t.Error("Expected message to be acked")
|
||||
}
|
||||
if _, exists := tracker.messages[string(key)]; exists {
|
||||
t.Error("Expected message to be deleted after ack")
|
||||
}
|
||||
if tracker.timestamps.size != 0 {
|
||||
t.Error("Expected buffer size to be 0 after ack")
|
||||
}
|
||||
assert.Equal(t, timestamp, tracker.GetOldestAckedTimestamp())
|
||||
}
|
||||
|
||||
func TestInflightMessageTracker2(t *testing.T) {
|
||||
// Initialize an InflightMessageTracker with initial capacity 1
|
||||
tracker := NewInflightMessageTracker(1)
|
||||
|
||||
tracker.EnflightMessage([]byte("1"), int64(1))
|
||||
tracker.EnflightMessage([]byte("2"), int64(2))
|
||||
tracker.EnflightMessage([]byte("3"), int64(3))
|
||||
tracker.EnflightMessage([]byte("4"), int64(4))
|
||||
tracker.EnflightMessage([]byte("5"), int64(5))
|
||||
assert.True(t, tracker.AcknowledgeMessage([]byte("1"), int64(1)))
|
||||
assert.Equal(t, int64(1), tracker.GetOldestAckedTimestamp())
|
||||
|
||||
// Test IsMessageAcknowledged method
|
||||
isAcked := tracker.IsMessageAcknowledged([]byte("2"), int64(2))
|
||||
if isAcked {
|
||||
t.Error("Expected message to be not acked")
|
||||
}
|
||||
|
||||
// Test AcknowledgeMessage method
|
||||
assert.True(t, tracker.AcknowledgeMessage([]byte("2"), int64(2)))
|
||||
assert.Equal(t, int64(2), tracker.GetOldestAckedTimestamp())
|
||||
|
||||
}
|
||||
|
||||
func TestInflightMessageTracker3(t *testing.T) {
|
||||
// Initialize an InflightMessageTracker with initial capacity 1
|
||||
tracker := NewInflightMessageTracker(1)
|
||||
|
||||
tracker.EnflightMessage([]byte("1"), int64(1))
|
||||
tracker.EnflightMessage([]byte("2"), int64(2))
|
||||
tracker.EnflightMessage([]byte("3"), int64(3))
|
||||
assert.True(t, tracker.AcknowledgeMessage([]byte("1"), int64(1)))
|
||||
tracker.EnflightMessage([]byte("4"), int64(4))
|
||||
tracker.EnflightMessage([]byte("5"), int64(5))
|
||||
assert.True(t, tracker.AcknowledgeMessage([]byte("2"), int64(2)))
|
||||
assert.True(t, tracker.AcknowledgeMessage([]byte("3"), int64(3)))
|
||||
tracker.EnflightMessage([]byte("6"), int64(6))
|
||||
tracker.EnflightMessage([]byte("7"), int64(7))
|
||||
assert.True(t, tracker.AcknowledgeMessage([]byte("4"), int64(4)))
|
||||
assert.True(t, tracker.AcknowledgeMessage([]byte("5"), int64(5)))
|
||||
assert.True(t, tracker.AcknowledgeMessage([]byte("6"), int64(6)))
|
||||
assert.Equal(t, int64(6), tracker.GetOldestAckedTimestamp())
|
||||
assert.True(t, tracker.AcknowledgeMessage([]byte("7"), int64(7)))
|
||||
assert.Equal(t, int64(7), tracker.GetOldestAckedTimestamp())
|
||||
|
||||
}
|
||||
|
||||
func TestInflightMessageTracker4(t *testing.T) {
|
||||
// Initialize an InflightMessageTracker with initial capacity 1
|
||||
tracker := NewInflightMessageTracker(1)
|
||||
|
||||
tracker.EnflightMessage([]byte("1"), int64(1))
|
||||
tracker.EnflightMessage([]byte("2"), int64(2))
|
||||
assert.True(t, tracker.AcknowledgeMessage([]byte("1"), int64(1)))
|
||||
assert.True(t, tracker.AcknowledgeMessage([]byte("2"), int64(2)))
|
||||
tracker.EnflightMessage([]byte("3"), int64(3))
|
||||
assert.True(t, tracker.AcknowledgeMessage([]byte("3"), int64(3)))
|
||||
assert.Equal(t, int64(3), tracker.GetOldestAckedTimestamp())
|
||||
|
||||
}
|
||||
@@ -1,130 +1,6 @@
|
||||
package sub_coordinator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
|
||||
)
|
||||
|
||||
type PartitionConsumerMapping struct {
|
||||
currentMapping *PartitionSlotToConsumerInstanceList
|
||||
prevMappings []*PartitionSlotToConsumerInstanceList
|
||||
}
|
||||
|
||||
// Balance goal:
|
||||
// 1. max processing power utilization
|
||||
// 2. allow one consumer instance to be down unexpectedly
|
||||
// without affecting the processing power utilization
|
||||
|
||||
func (pcm *PartitionConsumerMapping) BalanceToConsumerInstances(partitionSlotToBrokerList *pub_balancer.PartitionSlotToBrokerList, consumerInstances []*ConsumerGroupInstance) {
|
||||
if len(partitionSlotToBrokerList.PartitionSlots) == 0 || len(consumerInstances) == 0 {
|
||||
return
|
||||
}
|
||||
newMapping := NewPartitionSlotToConsumerInstanceList(partitionSlotToBrokerList.RingSize, time.Now())
|
||||
var prevMapping *PartitionSlotToConsumerInstanceList
|
||||
if len(pcm.prevMappings) > 0 {
|
||||
prevMapping = pcm.prevMappings[len(pcm.prevMappings)-1]
|
||||
} else {
|
||||
prevMapping = nil
|
||||
}
|
||||
newMapping.PartitionSlots = doBalanceSticky(partitionSlotToBrokerList.PartitionSlots, consumerInstances, prevMapping)
|
||||
if pcm.currentMapping != nil {
|
||||
pcm.prevMappings = append(pcm.prevMappings, pcm.currentMapping)
|
||||
if len(pcm.prevMappings) > 10 {
|
||||
pcm.prevMappings = pcm.prevMappings[1:]
|
||||
}
|
||||
}
|
||||
pcm.currentMapping = newMapping
|
||||
}
|
||||
|
||||
func doBalanceSticky(partitions []*pub_balancer.PartitionSlotToBroker, consumerInstances []*ConsumerGroupInstance, prevMapping *PartitionSlotToConsumerInstanceList) (partitionSlots []*PartitionSlotToConsumerInstance) {
|
||||
// collect previous consumer instance ids
|
||||
prevConsumerInstanceIds := make(map[ConsumerGroupInstanceId]struct{})
|
||||
if prevMapping != nil {
|
||||
for _, prevPartitionSlot := range prevMapping.PartitionSlots {
|
||||
if prevPartitionSlot.AssignedInstanceId != "" {
|
||||
prevConsumerInstanceIds[prevPartitionSlot.AssignedInstanceId] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
// collect current consumer instance ids
|
||||
currConsumerInstanceIds := make(map[ConsumerGroupInstanceId]struct{})
|
||||
for _, consumerInstance := range consumerInstances {
|
||||
currConsumerInstanceIds[consumerInstance.InstanceId] = struct{}{}
|
||||
}
|
||||
|
||||
// check deleted consumer instances
|
||||
deletedConsumerInstanceIds := make(map[ConsumerGroupInstanceId]struct{})
|
||||
for consumerInstanceId := range prevConsumerInstanceIds {
|
||||
if _, ok := currConsumerInstanceIds[consumerInstanceId]; !ok {
|
||||
deletedConsumerInstanceIds[consumerInstanceId] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// convert partition slots from list to a map
|
||||
prevPartitionSlotMap := make(map[string]*PartitionSlotToConsumerInstance)
|
||||
if prevMapping != nil {
|
||||
for _, partitionSlot := range prevMapping.PartitionSlots {
|
||||
key := fmt.Sprintf("%d-%d", partitionSlot.RangeStart, partitionSlot.RangeStop)
|
||||
prevPartitionSlotMap[key] = partitionSlot
|
||||
}
|
||||
}
|
||||
|
||||
// make a copy of old mapping, skipping the deleted consumer instances
|
||||
newPartitionSlots := make([]*PartitionSlotToConsumerInstance, 0, len(partitions))
|
||||
for _, partition := range partitions {
|
||||
newPartitionSlots = append(newPartitionSlots, &PartitionSlotToConsumerInstance{
|
||||
RangeStart: partition.RangeStart,
|
||||
RangeStop: partition.RangeStop,
|
||||
UnixTimeNs: partition.UnixTimeNs,
|
||||
Broker: partition.AssignedBroker,
|
||||
FollowerBroker: partition.FollowerBroker,
|
||||
})
|
||||
}
|
||||
for _, newPartitionSlot := range newPartitionSlots {
|
||||
key := fmt.Sprintf("%d-%d", newPartitionSlot.RangeStart, newPartitionSlot.RangeStop)
|
||||
if prevPartitionSlot, ok := prevPartitionSlotMap[key]; ok {
|
||||
if _, ok := deletedConsumerInstanceIds[prevPartitionSlot.AssignedInstanceId]; !ok {
|
||||
newPartitionSlot.AssignedInstanceId = prevPartitionSlot.AssignedInstanceId
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// for all consumer instances, count the average number of partitions
|
||||
// that are assigned to them
|
||||
consumerInstancePartitionCount := make(map[ConsumerGroupInstanceId]int)
|
||||
for _, newPartitionSlot := range newPartitionSlots {
|
||||
if newPartitionSlot.AssignedInstanceId != "" {
|
||||
consumerInstancePartitionCount[newPartitionSlot.AssignedInstanceId]++
|
||||
}
|
||||
}
|
||||
// average number of partitions that are assigned to each consumer instance
|
||||
averageConsumerInstanceLoad := float32(len(partitions)) / float32(len(consumerInstances))
|
||||
|
||||
// assign unassigned partition slots to consumer instances that is underloaded
|
||||
consumerInstanceIdsIndex := 0
|
||||
for _, newPartitionSlot := range newPartitionSlots {
|
||||
if newPartitionSlot.AssignedInstanceId == "" {
|
||||
for avoidDeadLoop := len(consumerInstances); avoidDeadLoop > 0; avoidDeadLoop-- {
|
||||
consumerInstance := consumerInstances[consumerInstanceIdsIndex]
|
||||
if float32(consumerInstancePartitionCount[consumerInstance.InstanceId]) < averageConsumerInstanceLoad {
|
||||
newPartitionSlot.AssignedInstanceId = consumerInstance.InstanceId
|
||||
consumerInstancePartitionCount[consumerInstance.InstanceId]++
|
||||
consumerInstanceIdsIndex++
|
||||
if consumerInstanceIdsIndex >= len(consumerInstances) {
|
||||
consumerInstanceIdsIndex = 0
|
||||
}
|
||||
break
|
||||
} else {
|
||||
consumerInstanceIdsIndex++
|
||||
if consumerInstanceIdsIndex >= len(consumerInstances) {
|
||||
consumerInstanceIdsIndex = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newPartitionSlots
|
||||
}
|
||||
|
||||
@@ -1,385 +0,0 @@
|
||||
package sub_coordinator
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
|
||||
)
|
||||
|
||||
func Test_doBalanceSticky(t *testing.T) {
|
||||
type args struct {
|
||||
partitions []*pub_balancer.PartitionSlotToBroker
|
||||
consumerInstanceIds []*ConsumerGroupInstance
|
||||
prevMapping *PartitionSlotToConsumerInstanceList
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantPartitionSlots []*PartitionSlotToConsumerInstance
|
||||
}{
|
||||
{
|
||||
name: "1 consumer instance, 1 partition",
|
||||
args: args{
|
||||
partitions: []*pub_balancer.PartitionSlotToBroker{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 100,
|
||||
},
|
||||
},
|
||||
consumerInstanceIds: []*ConsumerGroupInstance{
|
||||
{
|
||||
InstanceId: "consumer-instance-1",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
},
|
||||
prevMapping: nil,
|
||||
},
|
||||
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 100,
|
||||
AssignedInstanceId: "consumer-instance-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 consumer instances, 1 partition",
|
||||
args: args{
|
||||
partitions: []*pub_balancer.PartitionSlotToBroker{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 100,
|
||||
},
|
||||
},
|
||||
consumerInstanceIds: []*ConsumerGroupInstance{
|
||||
{
|
||||
InstanceId: "consumer-instance-1",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
{
|
||||
InstanceId: "consumer-instance-2",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
},
|
||||
prevMapping: nil,
|
||||
},
|
||||
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 100,
|
||||
AssignedInstanceId: "consumer-instance-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "1 consumer instance, 2 partitions",
|
||||
args: args{
|
||||
partitions: []*pub_balancer.PartitionSlotToBroker{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
},
|
||||
},
|
||||
consumerInstanceIds: []*ConsumerGroupInstance{
|
||||
{
|
||||
InstanceId: "consumer-instance-1",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
},
|
||||
prevMapping: nil,
|
||||
},
|
||||
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
AssignedInstanceId: "consumer-instance-1",
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
AssignedInstanceId: "consumer-instance-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 consumer instances, 2 partitions",
|
||||
args: args{
|
||||
partitions: []*pub_balancer.PartitionSlotToBroker{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
},
|
||||
},
|
||||
consumerInstanceIds: []*ConsumerGroupInstance{
|
||||
{
|
||||
InstanceId: "consumer-instance-1",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
{
|
||||
InstanceId: "consumer-instance-2",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
},
|
||||
prevMapping: nil,
|
||||
},
|
||||
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
AssignedInstanceId: "consumer-instance-1",
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
AssignedInstanceId: "consumer-instance-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 consumer instances, 2 partitions, 1 deleted consumer instance",
|
||||
args: args{
|
||||
partitions: []*pub_balancer.PartitionSlotToBroker{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
},
|
||||
},
|
||||
consumerInstanceIds: []*ConsumerGroupInstance{
|
||||
{
|
||||
InstanceId: "consumer-instance-1",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
{
|
||||
InstanceId: "consumer-instance-2",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
},
|
||||
prevMapping: &PartitionSlotToConsumerInstanceList{
|
||||
PartitionSlots: []*PartitionSlotToConsumerInstance{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
AssignedInstanceId: "consumer-instance-3",
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
AssignedInstanceId: "consumer-instance-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
AssignedInstanceId: "consumer-instance-1",
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
AssignedInstanceId: "consumer-instance-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 consumer instances, 2 partitions, 1 new consumer instance",
|
||||
args: args{
|
||||
partitions: []*pub_balancer.PartitionSlotToBroker{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
},
|
||||
},
|
||||
consumerInstanceIds: []*ConsumerGroupInstance{
|
||||
{
|
||||
InstanceId: "consumer-instance-1",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
{
|
||||
InstanceId: "consumer-instance-2",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
{
|
||||
InstanceId: "consumer-instance-3",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
},
|
||||
prevMapping: &PartitionSlotToConsumerInstanceList{
|
||||
PartitionSlots: []*PartitionSlotToConsumerInstance{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
AssignedInstanceId: "consumer-instance-3",
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
AssignedInstanceId: "consumer-instance-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
AssignedInstanceId: "consumer-instance-3",
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
AssignedInstanceId: "consumer-instance-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 consumer instances, 2 partitions, 1 new partition",
|
||||
args: args{
|
||||
partitions: []*pub_balancer.PartitionSlotToBroker{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
},
|
||||
{
|
||||
RangeStart: 100,
|
||||
RangeStop: 150,
|
||||
},
|
||||
},
|
||||
consumerInstanceIds: []*ConsumerGroupInstance{
|
||||
{
|
||||
InstanceId: "consumer-instance-1",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
{
|
||||
InstanceId: "consumer-instance-2",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
},
|
||||
prevMapping: &PartitionSlotToConsumerInstanceList{
|
||||
PartitionSlots: []*PartitionSlotToConsumerInstance{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
AssignedInstanceId: "consumer-instance-1",
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
AssignedInstanceId: "consumer-instance-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
AssignedInstanceId: "consumer-instance-1",
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
AssignedInstanceId: "consumer-instance-2",
|
||||
},
|
||||
{
|
||||
RangeStart: 100,
|
||||
RangeStop: 150,
|
||||
AssignedInstanceId: "consumer-instance-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 consumer instances, 2 partitions, 1 new partition, 1 new consumer instance",
|
||||
args: args{
|
||||
partitions: []*pub_balancer.PartitionSlotToBroker{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
},
|
||||
{
|
||||
RangeStart: 100,
|
||||
RangeStop: 150,
|
||||
},
|
||||
},
|
||||
consumerInstanceIds: []*ConsumerGroupInstance{
|
||||
{
|
||||
InstanceId: "consumer-instance-1",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
{
|
||||
InstanceId: "consumer-instance-2",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
{
|
||||
InstanceId: "consumer-instance-3",
|
||||
MaxPartitionCount: 1,
|
||||
},
|
||||
},
|
||||
prevMapping: &PartitionSlotToConsumerInstanceList{
|
||||
PartitionSlots: []*PartitionSlotToConsumerInstance{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
AssignedInstanceId: "consumer-instance-1",
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
AssignedInstanceId: "consumer-instance-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPartitionSlots: []*PartitionSlotToConsumerInstance{
|
||||
{
|
||||
RangeStart: 0,
|
||||
RangeStop: 50,
|
||||
AssignedInstanceId: "consumer-instance-1",
|
||||
},
|
||||
{
|
||||
RangeStart: 50,
|
||||
RangeStop: 100,
|
||||
AssignedInstanceId: "consumer-instance-2",
|
||||
},
|
||||
{
|
||||
RangeStart: 100,
|
||||
RangeStop: 150,
|
||||
AssignedInstanceId: "consumer-instance-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if gotPartitionSlots := doBalanceSticky(tt.args.partitions, tt.args.consumerInstanceIds, tt.args.prevMapping); !reflect.DeepEqual(gotPartitionSlots, tt.wantPartitionSlots) {
|
||||
t.Errorf("doBalanceSticky() = %v, want %v", gotPartitionSlots, tt.wantPartitionSlots)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
package sub_coordinator
|
||||
|
||||
import "time"
|
||||
|
||||
type PartitionSlotToConsumerInstance struct {
|
||||
RangeStart int32
|
||||
RangeStop int32
|
||||
@@ -16,10 +14,3 @@ type PartitionSlotToConsumerInstanceList struct {
|
||||
RingSize int32
|
||||
Version int64
|
||||
}
|
||||
|
||||
func NewPartitionSlotToConsumerInstanceList(ringSize int32, version time.Time) *PartitionSlotToConsumerInstanceList {
|
||||
return &PartitionSlotToConsumerInstanceList{
|
||||
RingSize: ringSize,
|
||||
Version: version.UnixNano(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,22 +90,3 @@ type OffsetAwarePublisher struct {
|
||||
partition *LocalPartition
|
||||
assignOffsetFn OffsetAssignmentFunc
|
||||
}
|
||||
|
||||
// NewOffsetAwarePublisher creates a new offset-aware publisher
|
||||
func NewOffsetAwarePublisher(partition *LocalPartition, assignOffsetFn OffsetAssignmentFunc) *OffsetAwarePublisher {
|
||||
return &OffsetAwarePublisher{
|
||||
partition: partition,
|
||||
assignOffsetFn: assignOffsetFn,
|
||||
}
|
||||
}
|
||||
|
||||
// Publish publishes a message with automatic offset assignment
|
||||
func (oap *OffsetAwarePublisher) Publish(message *mq_pb.DataMessage) error {
|
||||
_, err := oap.partition.PublishWithOffset(message, oap.assignOffsetFn)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetPartition returns the underlying partition
|
||||
func (oap *OffsetAwarePublisher) GetPartition() *LocalPartition {
|
||||
return oap.partition
|
||||
}
|
||||
|
||||
@@ -16,15 +16,6 @@ type Partition struct {
|
||||
UnixTimeNs int64 // in nanoseconds
|
||||
}
|
||||
|
||||
func NewPartition(rangeStart, rangeStop, ringSize int32, unixTimeNs int64) *Partition {
|
||||
return &Partition{
|
||||
RangeStart: rangeStart,
|
||||
RangeStop: rangeStop,
|
||||
RingSize: ringSize,
|
||||
UnixTimeNs: unixTimeNs,
|
||||
}
|
||||
}
|
||||
|
||||
func (partition Partition) Equals(other Partition) bool {
|
||||
if partition.RangeStart != other.RangeStart {
|
||||
return false
|
||||
@@ -57,24 +48,6 @@ func FromPbPartition(partition *schema_pb.Partition) Partition {
|
||||
}
|
||||
}
|
||||
|
||||
func SplitPartitions(targetCount int32, ts int64) []*Partition {
|
||||
partitions := make([]*Partition, 0, targetCount)
|
||||
partitionSize := PartitionCount / targetCount
|
||||
for i := int32(0); i < targetCount; i++ {
|
||||
partitionStop := (i + 1) * partitionSize
|
||||
if i == targetCount-1 {
|
||||
partitionStop = PartitionCount
|
||||
}
|
||||
partitions = append(partitions, &Partition{
|
||||
RangeStart: i * partitionSize,
|
||||
RangeStop: partitionStop,
|
||||
RingSize: PartitionCount,
|
||||
UnixTimeNs: ts,
|
||||
})
|
||||
}
|
||||
return partitions
|
||||
}
|
||||
|
||||
func (partition Partition) ToPbPartition() *schema_pb.Partition {
|
||||
return &schema_pb.Partition{
|
||||
RangeStart: partition.RangeStart,
|
||||
|
||||
Reference in New Issue
Block a user