go fmt
This commit is contained in:
@@ -46,7 +46,6 @@ func (h *MaintenanceHandlers) ShowTaskDetail(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
c.Header("Content-Type", "text/html")
|
c.Header("Content-Type", "text/html")
|
||||||
taskDetailComponent := app.TaskDetail(taskDetail)
|
taskDetailComponent := app.TaskDetail(taskDetail)
|
||||||
layoutComponent := layout.Layout(c, taskDetailComponent)
|
layoutComponent := layout.Layout(c, taskDetailComponent)
|
||||||
|
|||||||
@@ -306,25 +306,21 @@ func (s *MaintenanceIntegration) CanScheduleWithTaskSchedulers(task *Maintenance
|
|||||||
return false // Fallback to existing logic for unknown types
|
return false // Fallback to existing logic for unknown types
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Convert task objects
|
// Convert task objects
|
||||||
taskObject := s.convertTaskToTaskSystem(task)
|
taskObject := s.convertTaskToTaskSystem(task)
|
||||||
if taskObject == nil {
|
if taskObject == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
runningTaskObjects := s.convertTasksToTaskSystem(runningTasks)
|
runningTaskObjects := s.convertTasksToTaskSystem(runningTasks)
|
||||||
workerObjects := s.convertWorkersToTaskSystem(availableWorkers)
|
workerObjects := s.convertWorkersToTaskSystem(availableWorkers)
|
||||||
|
|
||||||
|
|
||||||
// Get the appropriate scheduler
|
// Get the appropriate scheduler
|
||||||
scheduler := s.taskRegistry.GetScheduler(taskType)
|
scheduler := s.taskRegistry.GetScheduler(taskType)
|
||||||
if scheduler == nil {
|
if scheduler == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
canSchedule := scheduler.CanScheduleNow(taskObject, runningTaskObjects, workerObjects)
|
canSchedule := scheduler.CanScheduleNow(taskObject, runningTaskObjects, workerObjects)
|
||||||
|
|
||||||
return canSchedule
|
return canSchedule
|
||||||
|
|||||||
@@ -2,11 +2,11 @@ package command
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"github.com/posener/complete"
|
"github.com/posener/complete"
|
||||||
completeinstall "github.com/posener/complete/cmd/install"
|
completeinstall "github.com/posener/complete/cmd/install"
|
||||||
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
|
flag "github.com/seaweedfs/seaweedfs/weed/util/fla9"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -53,14 +53,14 @@ func printAutocompleteScript(shell string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
switch shell {
|
switch shell {
|
||||||
case "bash":
|
case "bash":
|
||||||
fmt.Printf("complete -C %q weed\n", binPath)
|
fmt.Printf("complete -C %q weed\n", binPath)
|
||||||
case "zsh":
|
case "zsh":
|
||||||
fmt.Printf("autoload -U +X bashcompinit && bashcompinit\n")
|
fmt.Printf("autoload -U +X bashcompinit && bashcompinit\n")
|
||||||
fmt.Printf("complete -o nospace -C %q weed\n", binPath)
|
fmt.Printf("complete -o nospace -C %q weed\n", binPath)
|
||||||
case "fish":
|
case "fish":
|
||||||
fmt.Printf(`function __complete_weed
|
fmt.Printf(`function __complete_weed
|
||||||
set -lx COMP_LINE (commandline -cp)
|
set -lx COMP_LINE (commandline -cp)
|
||||||
test -z (commandline -ct)
|
test -z (commandline -ct)
|
||||||
and set COMP_LINE "$COMP_LINE "
|
and set COMP_LINE "$COMP_LINE "
|
||||||
@@ -68,10 +68,10 @@ func printAutocompleteScript(shell string) bool {
|
|||||||
end
|
end
|
||||||
complete -f -c weed -a "(__complete_weed)"
|
complete -f -c weed -a "(__complete_weed)"
|
||||||
`, binPath)
|
`, binPath)
|
||||||
default:
|
default:
|
||||||
fmt.Fprintf(os.Stderr, "unsupported shell: %s. Supported shells: bash, zsh, fish\n", shell)
|
fmt.Fprintf(os.Stderr, "unsupported shell: %s. Supported shells: bash, zsh, fish\n", shell)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -272,7 +272,6 @@ subscribeLoop:
|
|||||||
TsNs: logEntry.TsNs,
|
TsNs: logEntry.TsNs,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if err := stream.Send(&mq_pb.SubscribeMessageResponse{Message: &mq_pb.SubscribeMessageResponse_Data{
|
if err := stream.Send(&mq_pb.SubscribeMessageResponse{Message: &mq_pb.SubscribeMessageResponse_Data{
|
||||||
Data: dataMsg,
|
Data: dataMsg,
|
||||||
}}); err != nil {
|
}}); err != nil {
|
||||||
|
|||||||
@@ -103,15 +103,15 @@ func TestIncrementalCooperativeAssignmentStrategy_RebalanceWithRevocation(t *tes
|
|||||||
t.Errorf("Expected member-2 to have 0 partitions during revocation, got %d", len(member2Assignments))
|
t.Errorf("Expected member-2 to have 0 partitions during revocation, got %d", len(member2Assignments))
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logf("Revocation phase - Member-1: %d partitions, Member-2: %d partitions",
|
t.Logf("Revocation phase - Member-1: %d partitions, Member-2: %d partitions",
|
||||||
len(member1Assignments), len(member2Assignments))
|
len(member1Assignments), len(member2Assignments))
|
||||||
|
|
||||||
// Simulate time passing and second call (should move to assignment phase)
|
// Simulate time passing and second call (should move to assignment phase)
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
|
||||||
// Force move to assignment phase by setting timeout to 0
|
// Force move to assignment phase by setting timeout to 0
|
||||||
state.RevocationTimeout = 0
|
state.RevocationTimeout = 0
|
||||||
|
|
||||||
assignments2 := strategy.Assign(members, topicPartitions)
|
assignments2 := strategy.Assign(members, topicPartitions)
|
||||||
|
|
||||||
// Should complete rebalance
|
// Should complete rebalance
|
||||||
@@ -136,7 +136,7 @@ func TestIncrementalCooperativeAssignmentStrategy_RebalanceWithRevocation(t *tes
|
|||||||
t.Errorf("Expected 4 total partitions after rebalance, got %d", totalFinalPartitions)
|
t.Errorf("Expected 4 total partitions after rebalance, got %d", totalFinalPartitions)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logf("Final assignment - Member-1: %d partitions, Member-2: %d partitions",
|
t.Logf("Final assignment - Member-1: %d partitions, Member-2: %d partitions",
|
||||||
len(member1FinalAssignments), len(member2FinalAssignments))
|
len(member1FinalAssignments), len(member2FinalAssignments))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -239,7 +239,7 @@ func TestIncrementalCooperativeAssignmentStrategy_MultipleTopics(t *testing.T) {
|
|||||||
t.Errorf("Expected partition %s to be assigned", expected)
|
t.Errorf("Expected partition %s to be assigned", expected)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debug: Print all assigned partitions
|
// Debug: Print all assigned partitions
|
||||||
t.Logf("All assigned partitions: %v", allAssignedPartitions)
|
t.Logf("All assigned partitions: %v", allAssignedPartitions)
|
||||||
}
|
}
|
||||||
@@ -390,7 +390,7 @@ func TestIncrementalCooperativeAssignmentStrategy_StateTransitions(t *testing.T)
|
|||||||
// Force timeout to move to assignment phase
|
// Force timeout to move to assignment phase
|
||||||
state.RevocationTimeout = 0
|
state.RevocationTimeout = 0
|
||||||
strategy.Assign(members, topicPartitions)
|
strategy.Assign(members, topicPartitions)
|
||||||
|
|
||||||
// Should complete and return to None
|
// Should complete and return to None
|
||||||
state = strategy.GetRebalanceState()
|
state = strategy.GetRebalanceState()
|
||||||
if state.Phase != RebalancePhaseNone {
|
if state.Phase != RebalancePhaseNone {
|
||||||
|
|||||||
@@ -24,12 +24,12 @@ func (rtm *RebalanceTimeoutManager) CheckRebalanceTimeouts() {
|
|||||||
|
|
||||||
for _, group := range rtm.coordinator.groups {
|
for _, group := range rtm.coordinator.groups {
|
||||||
group.Mu.Lock()
|
group.Mu.Lock()
|
||||||
|
|
||||||
// Only check timeouts for groups in rebalancing states
|
// Only check timeouts for groups in rebalancing states
|
||||||
if group.State == GroupStatePreparingRebalance || group.State == GroupStateCompletingRebalance {
|
if group.State == GroupStatePreparingRebalance || group.State == GroupStateCompletingRebalance {
|
||||||
rtm.checkGroupRebalanceTimeout(group, now)
|
rtm.checkGroupRebalanceTimeout(group, now)
|
||||||
}
|
}
|
||||||
|
|
||||||
group.Mu.Unlock()
|
group.Mu.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -37,7 +37,7 @@ func (rtm *RebalanceTimeoutManager) CheckRebalanceTimeouts() {
|
|||||||
// checkGroupRebalanceTimeout checks and handles rebalance timeout for a specific group
|
// checkGroupRebalanceTimeout checks and handles rebalance timeout for a specific group
|
||||||
func (rtm *RebalanceTimeoutManager) checkGroupRebalanceTimeout(group *ConsumerGroup, now time.Time) {
|
func (rtm *RebalanceTimeoutManager) checkGroupRebalanceTimeout(group *ConsumerGroup, now time.Time) {
|
||||||
expiredMembers := make([]string, 0)
|
expiredMembers := make([]string, 0)
|
||||||
|
|
||||||
for memberID, member := range group.Members {
|
for memberID, member := range group.Members {
|
||||||
// Check if member has exceeded its rebalance timeout
|
// Check if member has exceeded its rebalance timeout
|
||||||
rebalanceTimeout := time.Duration(member.RebalanceTimeout) * time.Millisecond
|
rebalanceTimeout := time.Duration(member.RebalanceTimeout) * time.Millisecond
|
||||||
@@ -45,21 +45,21 @@ func (rtm *RebalanceTimeoutManager) checkGroupRebalanceTimeout(group *ConsumerGr
|
|||||||
// Use default rebalance timeout if not specified
|
// Use default rebalance timeout if not specified
|
||||||
rebalanceTimeout = time.Duration(rtm.coordinator.rebalanceTimeoutMs) * time.Millisecond
|
rebalanceTimeout = time.Duration(rtm.coordinator.rebalanceTimeoutMs) * time.Millisecond
|
||||||
}
|
}
|
||||||
|
|
||||||
// For members in pending state during rebalance, check against join time
|
// For members in pending state during rebalance, check against join time
|
||||||
if member.State == MemberStatePending {
|
if member.State == MemberStatePending {
|
||||||
if now.Sub(member.JoinedAt) > rebalanceTimeout {
|
if now.Sub(member.JoinedAt) > rebalanceTimeout {
|
||||||
expiredMembers = append(expiredMembers, memberID)
|
expiredMembers = append(expiredMembers, memberID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also check session timeout as a fallback
|
// Also check session timeout as a fallback
|
||||||
sessionTimeout := time.Duration(member.SessionTimeout) * time.Millisecond
|
sessionTimeout := time.Duration(member.SessionTimeout) * time.Millisecond
|
||||||
if now.Sub(member.LastHeartbeat) > sessionTimeout {
|
if now.Sub(member.LastHeartbeat) > sessionTimeout {
|
||||||
expiredMembers = append(expiredMembers, memberID)
|
expiredMembers = append(expiredMembers, memberID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove expired members and trigger rebalance if necessary
|
// Remove expired members and trigger rebalance if necessary
|
||||||
if len(expiredMembers) > 0 {
|
if len(expiredMembers) > 0 {
|
||||||
rtm.evictExpiredMembers(group, expiredMembers)
|
rtm.evictExpiredMembers(group, expiredMembers)
|
||||||
@@ -70,13 +70,13 @@ func (rtm *RebalanceTimeoutManager) checkGroupRebalanceTimeout(group *ConsumerGr
|
|||||||
func (rtm *RebalanceTimeoutManager) evictExpiredMembers(group *ConsumerGroup, expiredMembers []string) {
|
func (rtm *RebalanceTimeoutManager) evictExpiredMembers(group *ConsumerGroup, expiredMembers []string) {
|
||||||
for _, memberID := range expiredMembers {
|
for _, memberID := range expiredMembers {
|
||||||
delete(group.Members, memberID)
|
delete(group.Members, memberID)
|
||||||
|
|
||||||
// If the leader was evicted, clear leader
|
// If the leader was evicted, clear leader
|
||||||
if group.Leader == memberID {
|
if group.Leader == memberID {
|
||||||
group.Leader = ""
|
group.Leader = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update group state based on remaining members
|
// Update group state based on remaining members
|
||||||
if len(group.Members) == 0 {
|
if len(group.Members) == 0 {
|
||||||
group.State = GroupStateEmpty
|
group.State = GroupStateEmpty
|
||||||
@@ -92,18 +92,18 @@ func (rtm *RebalanceTimeoutManager) evictExpiredMembers(group *ConsumerGroup, ex
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset to preparing rebalance to restart the process
|
// Reset to preparing rebalance to restart the process
|
||||||
group.State = GroupStatePreparingRebalance
|
group.State = GroupStatePreparingRebalance
|
||||||
group.Generation++
|
group.Generation++
|
||||||
|
|
||||||
// Mark remaining members as pending
|
// Mark remaining members as pending
|
||||||
for _, member := range group.Members {
|
for _, member := range group.Members {
|
||||||
member.State = MemberStatePending
|
member.State = MemberStatePending
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
group.LastActivity = time.Now()
|
group.LastActivity = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -112,7 +112,7 @@ func (rtm *RebalanceTimeoutManager) IsRebalanceStuck(group *ConsumerGroup, maxRe
|
|||||||
if group.State != GroupStatePreparingRebalance && group.State != GroupStateCompletingRebalance {
|
if group.State != GroupStatePreparingRebalance && group.State != GroupStateCompletingRebalance {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return time.Since(group.LastActivity) > maxRebalanceDuration
|
return time.Since(group.LastActivity) > maxRebalanceDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -120,14 +120,14 @@ func (rtm *RebalanceTimeoutManager) IsRebalanceStuck(group *ConsumerGroup, maxRe
|
|||||||
func (rtm *RebalanceTimeoutManager) ForceCompleteRebalance(group *ConsumerGroup) {
|
func (rtm *RebalanceTimeoutManager) ForceCompleteRebalance(group *ConsumerGroup) {
|
||||||
group.Mu.Lock()
|
group.Mu.Lock()
|
||||||
defer group.Mu.Unlock()
|
defer group.Mu.Unlock()
|
||||||
|
|
||||||
// If stuck in preparing rebalance, move to completing
|
// If stuck in preparing rebalance, move to completing
|
||||||
if group.State == GroupStatePreparingRebalance {
|
if group.State == GroupStatePreparingRebalance {
|
||||||
group.State = GroupStateCompletingRebalance
|
group.State = GroupStateCompletingRebalance
|
||||||
group.LastActivity = time.Now()
|
group.LastActivity = time.Now()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// If stuck in completing rebalance, force to stable
|
// If stuck in completing rebalance, force to stable
|
||||||
if group.State == GroupStateCompletingRebalance {
|
if group.State == GroupStateCompletingRebalance {
|
||||||
group.State = GroupStateStable
|
group.State = GroupStateStable
|
||||||
@@ -145,21 +145,21 @@ func (rtm *RebalanceTimeoutManager) GetRebalanceStatus(groupID string) *Rebalanc
|
|||||||
if group == nil {
|
if group == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
group.Mu.RLock()
|
group.Mu.RLock()
|
||||||
defer group.Mu.RUnlock()
|
defer group.Mu.RUnlock()
|
||||||
|
|
||||||
status := &RebalanceStatus{
|
status := &RebalanceStatus{
|
||||||
GroupID: groupID,
|
GroupID: groupID,
|
||||||
State: group.State,
|
State: group.State,
|
||||||
Generation: group.Generation,
|
Generation: group.Generation,
|
||||||
MemberCount: len(group.Members),
|
MemberCount: len(group.Members),
|
||||||
Leader: group.Leader,
|
Leader: group.Leader,
|
||||||
LastActivity: group.LastActivity,
|
LastActivity: group.LastActivity,
|
||||||
IsRebalancing: group.State == GroupStatePreparingRebalance || group.State == GroupStateCompletingRebalance,
|
IsRebalancing: group.State == GroupStatePreparingRebalance || group.State == GroupStateCompletingRebalance,
|
||||||
RebalanceDuration: time.Since(group.LastActivity),
|
RebalanceDuration: time.Since(group.LastActivity),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate member timeout status
|
// Calculate member timeout status
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
for memberID, member := range group.Members {
|
for memberID, member := range group.Members {
|
||||||
@@ -171,48 +171,48 @@ func (rtm *RebalanceTimeoutManager) GetRebalanceStatus(groupID string) *Rebalanc
|
|||||||
SessionTimeout: time.Duration(member.SessionTimeout) * time.Millisecond,
|
SessionTimeout: time.Duration(member.SessionTimeout) * time.Millisecond,
|
||||||
RebalanceTimeout: time.Duration(member.RebalanceTimeout) * time.Millisecond,
|
RebalanceTimeout: time.Duration(member.RebalanceTimeout) * time.Millisecond,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate time until session timeout
|
// Calculate time until session timeout
|
||||||
sessionTimeRemaining := memberStatus.SessionTimeout - now.Sub(member.LastHeartbeat)
|
sessionTimeRemaining := memberStatus.SessionTimeout - now.Sub(member.LastHeartbeat)
|
||||||
if sessionTimeRemaining < 0 {
|
if sessionTimeRemaining < 0 {
|
||||||
sessionTimeRemaining = 0
|
sessionTimeRemaining = 0
|
||||||
}
|
}
|
||||||
memberStatus.SessionTimeRemaining = sessionTimeRemaining
|
memberStatus.SessionTimeRemaining = sessionTimeRemaining
|
||||||
|
|
||||||
// Calculate time until rebalance timeout
|
// Calculate time until rebalance timeout
|
||||||
rebalanceTimeRemaining := memberStatus.RebalanceTimeout - now.Sub(member.JoinedAt)
|
rebalanceTimeRemaining := memberStatus.RebalanceTimeout - now.Sub(member.JoinedAt)
|
||||||
if rebalanceTimeRemaining < 0 {
|
if rebalanceTimeRemaining < 0 {
|
||||||
rebalanceTimeRemaining = 0
|
rebalanceTimeRemaining = 0
|
||||||
}
|
}
|
||||||
memberStatus.RebalanceTimeRemaining = rebalanceTimeRemaining
|
memberStatus.RebalanceTimeRemaining = rebalanceTimeRemaining
|
||||||
|
|
||||||
status.Members = append(status.Members, memberStatus)
|
status.Members = append(status.Members, memberStatus)
|
||||||
}
|
}
|
||||||
|
|
||||||
return status
|
return status
|
||||||
}
|
}
|
||||||
|
|
||||||
// RebalanceStatus represents the current status of a group's rebalance
|
// RebalanceStatus represents the current status of a group's rebalance
|
||||||
type RebalanceStatus struct {
|
type RebalanceStatus struct {
|
||||||
GroupID string `json:"group_id"`
|
GroupID string `json:"group_id"`
|
||||||
State GroupState `json:"state"`
|
State GroupState `json:"state"`
|
||||||
Generation int32 `json:"generation"`
|
Generation int32 `json:"generation"`
|
||||||
MemberCount int `json:"member_count"`
|
MemberCount int `json:"member_count"`
|
||||||
Leader string `json:"leader"`
|
Leader string `json:"leader"`
|
||||||
LastActivity time.Time `json:"last_activity"`
|
LastActivity time.Time `json:"last_activity"`
|
||||||
IsRebalancing bool `json:"is_rebalancing"`
|
IsRebalancing bool `json:"is_rebalancing"`
|
||||||
RebalanceDuration time.Duration `json:"rebalance_duration"`
|
RebalanceDuration time.Duration `json:"rebalance_duration"`
|
||||||
Members []MemberTimeoutStatus `json:"members"`
|
Members []MemberTimeoutStatus `json:"members"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MemberTimeoutStatus represents timeout status for a group member
|
// MemberTimeoutStatus represents timeout status for a group member
|
||||||
type MemberTimeoutStatus struct {
|
type MemberTimeoutStatus struct {
|
||||||
MemberID string `json:"member_id"`
|
MemberID string `json:"member_id"`
|
||||||
State MemberState `json:"state"`
|
State MemberState `json:"state"`
|
||||||
LastHeartbeat time.Time `json:"last_heartbeat"`
|
LastHeartbeat time.Time `json:"last_heartbeat"`
|
||||||
JoinedAt time.Time `json:"joined_at"`
|
JoinedAt time.Time `json:"joined_at"`
|
||||||
SessionTimeout time.Duration `json:"session_timeout"`
|
SessionTimeout time.Duration `json:"session_timeout"`
|
||||||
RebalanceTimeout time.Duration `json:"rebalance_timeout"`
|
RebalanceTimeout time.Duration `json:"rebalance_timeout"`
|
||||||
SessionTimeRemaining time.Duration `json:"session_time_remaining"`
|
SessionTimeRemaining time.Duration `json:"session_time_remaining"`
|
||||||
RebalanceTimeRemaining time.Duration `json:"rebalance_time_remaining"`
|
RebalanceTimeRemaining time.Duration `json:"rebalance_time_remaining"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,14 +8,14 @@ import (
|
|||||||
func TestRebalanceTimeoutManager_CheckRebalanceTimeouts(t *testing.T) {
|
func TestRebalanceTimeoutManager_CheckRebalanceTimeouts(t *testing.T) {
|
||||||
coordinator := NewGroupCoordinator()
|
coordinator := NewGroupCoordinator()
|
||||||
defer coordinator.Close()
|
defer coordinator.Close()
|
||||||
|
|
||||||
rtm := coordinator.rebalanceTimeoutManager
|
rtm := coordinator.rebalanceTimeoutManager
|
||||||
|
|
||||||
// Create a group with a member that has a short rebalance timeout
|
// Create a group with a member that has a short rebalance timeout
|
||||||
group := coordinator.GetOrCreateGroup("test-group")
|
group := coordinator.GetOrCreateGroup("test-group")
|
||||||
group.Mu.Lock()
|
group.Mu.Lock()
|
||||||
group.State = GroupStatePreparingRebalance
|
group.State = GroupStatePreparingRebalance
|
||||||
|
|
||||||
member := &GroupMember{
|
member := &GroupMember{
|
||||||
ID: "member1",
|
ID: "member1",
|
||||||
ClientID: "client1",
|
ClientID: "client1",
|
||||||
@@ -27,15 +27,15 @@ func TestRebalanceTimeoutManager_CheckRebalanceTimeouts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
group.Members["member1"] = member
|
group.Members["member1"] = member
|
||||||
group.Mu.Unlock()
|
group.Mu.Unlock()
|
||||||
|
|
||||||
// Check timeouts - member should be evicted
|
// Check timeouts - member should be evicted
|
||||||
rtm.CheckRebalanceTimeouts()
|
rtm.CheckRebalanceTimeouts()
|
||||||
|
|
||||||
group.Mu.RLock()
|
group.Mu.RLock()
|
||||||
if len(group.Members) != 0 {
|
if len(group.Members) != 0 {
|
||||||
t.Errorf("Expected member to be evicted due to rebalance timeout, but %d members remain", len(group.Members))
|
t.Errorf("Expected member to be evicted due to rebalance timeout, but %d members remain", len(group.Members))
|
||||||
}
|
}
|
||||||
|
|
||||||
if group.State != GroupStateEmpty {
|
if group.State != GroupStateEmpty {
|
||||||
t.Errorf("Expected group state to be Empty after member eviction, got %s", group.State.String())
|
t.Errorf("Expected group state to be Empty after member eviction, got %s", group.State.String())
|
||||||
}
|
}
|
||||||
@@ -45,18 +45,18 @@ func TestRebalanceTimeoutManager_CheckRebalanceTimeouts(t *testing.T) {
|
|||||||
func TestRebalanceTimeoutManager_SessionTimeoutFallback(t *testing.T) {
|
func TestRebalanceTimeoutManager_SessionTimeoutFallback(t *testing.T) {
|
||||||
coordinator := NewGroupCoordinator()
|
coordinator := NewGroupCoordinator()
|
||||||
defer coordinator.Close()
|
defer coordinator.Close()
|
||||||
|
|
||||||
rtm := coordinator.rebalanceTimeoutManager
|
rtm := coordinator.rebalanceTimeoutManager
|
||||||
|
|
||||||
// Create a group with a member that has exceeded session timeout
|
// Create a group with a member that has exceeded session timeout
|
||||||
group := coordinator.GetOrCreateGroup("test-group")
|
group := coordinator.GetOrCreateGroup("test-group")
|
||||||
group.Mu.Lock()
|
group.Mu.Lock()
|
||||||
group.State = GroupStatePreparingRebalance
|
group.State = GroupStatePreparingRebalance
|
||||||
|
|
||||||
member := &GroupMember{
|
member := &GroupMember{
|
||||||
ID: "member1",
|
ID: "member1",
|
||||||
ClientID: "client1",
|
ClientID: "client1",
|
||||||
SessionTimeout: 1000, // 1 second
|
SessionTimeout: 1000, // 1 second
|
||||||
RebalanceTimeout: 30000, // 30 seconds
|
RebalanceTimeout: 30000, // 30 seconds
|
||||||
State: MemberStatePending,
|
State: MemberStatePending,
|
||||||
LastHeartbeat: time.Now().Add(-2 * time.Second), // Last heartbeat 2 seconds ago
|
LastHeartbeat: time.Now().Add(-2 * time.Second), // Last heartbeat 2 seconds ago
|
||||||
@@ -64,10 +64,10 @@ func TestRebalanceTimeoutManager_SessionTimeoutFallback(t *testing.T) {
|
|||||||
}
|
}
|
||||||
group.Members["member1"] = member
|
group.Members["member1"] = member
|
||||||
group.Mu.Unlock()
|
group.Mu.Unlock()
|
||||||
|
|
||||||
// Check timeouts - member should be evicted due to session timeout
|
// Check timeouts - member should be evicted due to session timeout
|
||||||
rtm.CheckRebalanceTimeouts()
|
rtm.CheckRebalanceTimeouts()
|
||||||
|
|
||||||
group.Mu.RLock()
|
group.Mu.RLock()
|
||||||
if len(group.Members) != 0 {
|
if len(group.Members) != 0 {
|
||||||
t.Errorf("Expected member to be evicted due to session timeout, but %d members remain", len(group.Members))
|
t.Errorf("Expected member to be evicted due to session timeout, but %d members remain", len(group.Members))
|
||||||
@@ -78,15 +78,15 @@ func TestRebalanceTimeoutManager_SessionTimeoutFallback(t *testing.T) {
|
|||||||
func TestRebalanceTimeoutManager_LeaderEviction(t *testing.T) {
|
func TestRebalanceTimeoutManager_LeaderEviction(t *testing.T) {
|
||||||
coordinator := NewGroupCoordinator()
|
coordinator := NewGroupCoordinator()
|
||||||
defer coordinator.Close()
|
defer coordinator.Close()
|
||||||
|
|
||||||
rtm := coordinator.rebalanceTimeoutManager
|
rtm := coordinator.rebalanceTimeoutManager
|
||||||
|
|
||||||
// Create a group with leader and another member
|
// Create a group with leader and another member
|
||||||
group := coordinator.GetOrCreateGroup("test-group")
|
group := coordinator.GetOrCreateGroup("test-group")
|
||||||
group.Mu.Lock()
|
group.Mu.Lock()
|
||||||
group.State = GroupStatePreparingRebalance
|
group.State = GroupStatePreparingRebalance
|
||||||
group.Leader = "member1"
|
group.Leader = "member1"
|
||||||
|
|
||||||
// Leader with expired rebalance timeout
|
// Leader with expired rebalance timeout
|
||||||
leader := &GroupMember{
|
leader := &GroupMember{
|
||||||
ID: "member1",
|
ID: "member1",
|
||||||
@@ -98,7 +98,7 @@ func TestRebalanceTimeoutManager_LeaderEviction(t *testing.T) {
|
|||||||
JoinedAt: time.Now().Add(-2 * time.Second),
|
JoinedAt: time.Now().Add(-2 * time.Second),
|
||||||
}
|
}
|
||||||
group.Members["member1"] = leader
|
group.Members["member1"] = leader
|
||||||
|
|
||||||
// Another member that's still valid
|
// Another member that's still valid
|
||||||
member2 := &GroupMember{
|
member2 := &GroupMember{
|
||||||
ID: "member2",
|
ID: "member2",
|
||||||
@@ -111,19 +111,19 @@ func TestRebalanceTimeoutManager_LeaderEviction(t *testing.T) {
|
|||||||
}
|
}
|
||||||
group.Members["member2"] = member2
|
group.Members["member2"] = member2
|
||||||
group.Mu.Unlock()
|
group.Mu.Unlock()
|
||||||
|
|
||||||
// Check timeouts - leader should be evicted, new leader selected
|
// Check timeouts - leader should be evicted, new leader selected
|
||||||
rtm.CheckRebalanceTimeouts()
|
rtm.CheckRebalanceTimeouts()
|
||||||
|
|
||||||
group.Mu.RLock()
|
group.Mu.RLock()
|
||||||
if len(group.Members) != 1 {
|
if len(group.Members) != 1 {
|
||||||
t.Errorf("Expected 1 member to remain after leader eviction, got %d", len(group.Members))
|
t.Errorf("Expected 1 member to remain after leader eviction, got %d", len(group.Members))
|
||||||
}
|
}
|
||||||
|
|
||||||
if group.Leader != "member2" {
|
if group.Leader != "member2" {
|
||||||
t.Errorf("Expected member2 to become new leader, got %s", group.Leader)
|
t.Errorf("Expected member2 to become new leader, got %s", group.Leader)
|
||||||
}
|
}
|
||||||
|
|
||||||
if group.State != GroupStatePreparingRebalance {
|
if group.State != GroupStatePreparingRebalance {
|
||||||
t.Errorf("Expected group to restart rebalancing after leader eviction, got %s", group.State.String())
|
t.Errorf("Expected group to restart rebalancing after leader eviction, got %s", group.State.String())
|
||||||
}
|
}
|
||||||
@@ -133,37 +133,37 @@ func TestRebalanceTimeoutManager_LeaderEviction(t *testing.T) {
|
|||||||
func TestRebalanceTimeoutManager_IsRebalanceStuck(t *testing.T) {
|
func TestRebalanceTimeoutManager_IsRebalanceStuck(t *testing.T) {
|
||||||
coordinator := NewGroupCoordinator()
|
coordinator := NewGroupCoordinator()
|
||||||
defer coordinator.Close()
|
defer coordinator.Close()
|
||||||
|
|
||||||
rtm := coordinator.rebalanceTimeoutManager
|
rtm := coordinator.rebalanceTimeoutManager
|
||||||
|
|
||||||
// Create a group that's been rebalancing for a while
|
// Create a group that's been rebalancing for a while
|
||||||
group := coordinator.GetOrCreateGroup("test-group")
|
group := coordinator.GetOrCreateGroup("test-group")
|
||||||
group.Mu.Lock()
|
group.Mu.Lock()
|
||||||
group.State = GroupStatePreparingRebalance
|
group.State = GroupStatePreparingRebalance
|
||||||
group.LastActivity = time.Now().Add(-15 * time.Minute) // 15 minutes ago
|
group.LastActivity = time.Now().Add(-15 * time.Minute) // 15 minutes ago
|
||||||
group.Mu.Unlock()
|
group.Mu.Unlock()
|
||||||
|
|
||||||
// Check if rebalance is stuck (max 10 minutes)
|
// Check if rebalance is stuck (max 10 minutes)
|
||||||
maxDuration := 10 * time.Minute
|
maxDuration := 10 * time.Minute
|
||||||
if !rtm.IsRebalanceStuck(group, maxDuration) {
|
if !rtm.IsRebalanceStuck(group, maxDuration) {
|
||||||
t.Error("Expected rebalance to be detected as stuck")
|
t.Error("Expected rebalance to be detected as stuck")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test with a group that's not stuck
|
// Test with a group that's not stuck
|
||||||
group.Mu.Lock()
|
group.Mu.Lock()
|
||||||
group.LastActivity = time.Now().Add(-5 * time.Minute) // 5 minutes ago
|
group.LastActivity = time.Now().Add(-5 * time.Minute) // 5 minutes ago
|
||||||
group.Mu.Unlock()
|
group.Mu.Unlock()
|
||||||
|
|
||||||
if rtm.IsRebalanceStuck(group, maxDuration) {
|
if rtm.IsRebalanceStuck(group, maxDuration) {
|
||||||
t.Error("Expected rebalance to not be detected as stuck")
|
t.Error("Expected rebalance to not be detected as stuck")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test with stable group (should not be stuck)
|
// Test with stable group (should not be stuck)
|
||||||
group.Mu.Lock()
|
group.Mu.Lock()
|
||||||
group.State = GroupStateStable
|
group.State = GroupStateStable
|
||||||
group.LastActivity = time.Now().Add(-15 * time.Minute)
|
group.LastActivity = time.Now().Add(-15 * time.Minute)
|
||||||
group.Mu.Unlock()
|
group.Mu.Unlock()
|
||||||
|
|
||||||
if rtm.IsRebalanceStuck(group, maxDuration) {
|
if rtm.IsRebalanceStuck(group, maxDuration) {
|
||||||
t.Error("Stable group should not be detected as stuck")
|
t.Error("Stable group should not be detected as stuck")
|
||||||
}
|
}
|
||||||
@@ -172,37 +172,37 @@ func TestRebalanceTimeoutManager_IsRebalanceStuck(t *testing.T) {
|
|||||||
func TestRebalanceTimeoutManager_ForceCompleteRebalance(t *testing.T) {
|
func TestRebalanceTimeoutManager_ForceCompleteRebalance(t *testing.T) {
|
||||||
coordinator := NewGroupCoordinator()
|
coordinator := NewGroupCoordinator()
|
||||||
defer coordinator.Close()
|
defer coordinator.Close()
|
||||||
|
|
||||||
rtm := coordinator.rebalanceTimeoutManager
|
rtm := coordinator.rebalanceTimeoutManager
|
||||||
|
|
||||||
// Test forcing completion from PreparingRebalance
|
// Test forcing completion from PreparingRebalance
|
||||||
group := coordinator.GetOrCreateGroup("test-group")
|
group := coordinator.GetOrCreateGroup("test-group")
|
||||||
group.Mu.Lock()
|
group.Mu.Lock()
|
||||||
group.State = GroupStatePreparingRebalance
|
group.State = GroupStatePreparingRebalance
|
||||||
|
|
||||||
member := &GroupMember{
|
member := &GroupMember{
|
||||||
ID: "member1",
|
ID: "member1",
|
||||||
State: MemberStatePending,
|
State: MemberStatePending,
|
||||||
}
|
}
|
||||||
group.Members["member1"] = member
|
group.Members["member1"] = member
|
||||||
group.Mu.Unlock()
|
group.Mu.Unlock()
|
||||||
|
|
||||||
rtm.ForceCompleteRebalance(group)
|
rtm.ForceCompleteRebalance(group)
|
||||||
|
|
||||||
group.Mu.RLock()
|
group.Mu.RLock()
|
||||||
if group.State != GroupStateCompletingRebalance {
|
if group.State != GroupStateCompletingRebalance {
|
||||||
t.Errorf("Expected group state to be CompletingRebalance, got %s", group.State.String())
|
t.Errorf("Expected group state to be CompletingRebalance, got %s", group.State.String())
|
||||||
}
|
}
|
||||||
group.Mu.RUnlock()
|
group.Mu.RUnlock()
|
||||||
|
|
||||||
// Test forcing completion from CompletingRebalance
|
// Test forcing completion from CompletingRebalance
|
||||||
rtm.ForceCompleteRebalance(group)
|
rtm.ForceCompleteRebalance(group)
|
||||||
|
|
||||||
group.Mu.RLock()
|
group.Mu.RLock()
|
||||||
if group.State != GroupStateStable {
|
if group.State != GroupStateStable {
|
||||||
t.Errorf("Expected group state to be Stable, got %s", group.State.String())
|
t.Errorf("Expected group state to be Stable, got %s", group.State.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
if member.State != MemberStateStable {
|
if member.State != MemberStateStable {
|
||||||
t.Errorf("Expected member state to be Stable, got %s", member.State.String())
|
t.Errorf("Expected member state to be Stable, got %s", member.State.String())
|
||||||
}
|
}
|
||||||
@@ -212,15 +212,15 @@ func TestRebalanceTimeoutManager_ForceCompleteRebalance(t *testing.T) {
|
|||||||
func TestRebalanceTimeoutManager_GetRebalanceStatus(t *testing.T) {
|
func TestRebalanceTimeoutManager_GetRebalanceStatus(t *testing.T) {
|
||||||
coordinator := NewGroupCoordinator()
|
coordinator := NewGroupCoordinator()
|
||||||
defer coordinator.Close()
|
defer coordinator.Close()
|
||||||
|
|
||||||
rtm := coordinator.rebalanceTimeoutManager
|
rtm := coordinator.rebalanceTimeoutManager
|
||||||
|
|
||||||
// Test with non-existent group
|
// Test with non-existent group
|
||||||
status := rtm.GetRebalanceStatus("non-existent")
|
status := rtm.GetRebalanceStatus("non-existent")
|
||||||
if status != nil {
|
if status != nil {
|
||||||
t.Error("Expected nil status for non-existent group")
|
t.Error("Expected nil status for non-existent group")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a group with members
|
// Create a group with members
|
||||||
group := coordinator.GetOrCreateGroup("test-group")
|
group := coordinator.GetOrCreateGroup("test-group")
|
||||||
group.Mu.Lock()
|
group.Mu.Lock()
|
||||||
@@ -228,7 +228,7 @@ func TestRebalanceTimeoutManager_GetRebalanceStatus(t *testing.T) {
|
|||||||
group.Generation = 5
|
group.Generation = 5
|
||||||
group.Leader = "member1"
|
group.Leader = "member1"
|
||||||
group.LastActivity = time.Now().Add(-2 * time.Minute)
|
group.LastActivity = time.Now().Add(-2 * time.Minute)
|
||||||
|
|
||||||
member1 := &GroupMember{
|
member1 := &GroupMember{
|
||||||
ID: "member1",
|
ID: "member1",
|
||||||
State: MemberStatePending,
|
State: MemberStatePending,
|
||||||
@@ -238,7 +238,7 @@ func TestRebalanceTimeoutManager_GetRebalanceStatus(t *testing.T) {
|
|||||||
RebalanceTimeout: 300000, // 5 minutes
|
RebalanceTimeout: 300000, // 5 minutes
|
||||||
}
|
}
|
||||||
group.Members["member1"] = member1
|
group.Members["member1"] = member1
|
||||||
|
|
||||||
member2 := &GroupMember{
|
member2 := &GroupMember{
|
||||||
ID: "member2",
|
ID: "member2",
|
||||||
State: MemberStatePending,
|
State: MemberStatePending,
|
||||||
@@ -249,48 +249,48 @@ func TestRebalanceTimeoutManager_GetRebalanceStatus(t *testing.T) {
|
|||||||
}
|
}
|
||||||
group.Members["member2"] = member2
|
group.Members["member2"] = member2
|
||||||
group.Mu.Unlock()
|
group.Mu.Unlock()
|
||||||
|
|
||||||
// Get status
|
// Get status
|
||||||
status = rtm.GetRebalanceStatus("test-group")
|
status = rtm.GetRebalanceStatus("test-group")
|
||||||
|
|
||||||
if status == nil {
|
if status == nil {
|
||||||
t.Fatal("Expected non-nil status")
|
t.Fatal("Expected non-nil status")
|
||||||
}
|
}
|
||||||
|
|
||||||
if status.GroupID != "test-group" {
|
if status.GroupID != "test-group" {
|
||||||
t.Errorf("Expected group ID 'test-group', got %s", status.GroupID)
|
t.Errorf("Expected group ID 'test-group', got %s", status.GroupID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if status.State != GroupStatePreparingRebalance {
|
if status.State != GroupStatePreparingRebalance {
|
||||||
t.Errorf("Expected state PreparingRebalance, got %s", status.State.String())
|
t.Errorf("Expected state PreparingRebalance, got %s", status.State.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
if status.Generation != 5 {
|
if status.Generation != 5 {
|
||||||
t.Errorf("Expected generation 5, got %d", status.Generation)
|
t.Errorf("Expected generation 5, got %d", status.Generation)
|
||||||
}
|
}
|
||||||
|
|
||||||
if status.MemberCount != 2 {
|
if status.MemberCount != 2 {
|
||||||
t.Errorf("Expected 2 members, got %d", status.MemberCount)
|
t.Errorf("Expected 2 members, got %d", status.MemberCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
if status.Leader != "member1" {
|
if status.Leader != "member1" {
|
||||||
t.Errorf("Expected leader 'member1', got %s", status.Leader)
|
t.Errorf("Expected leader 'member1', got %s", status.Leader)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !status.IsRebalancing {
|
if !status.IsRebalancing {
|
||||||
t.Error("Expected IsRebalancing to be true")
|
t.Error("Expected IsRebalancing to be true")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(status.Members) != 2 {
|
if len(status.Members) != 2 {
|
||||||
t.Errorf("Expected 2 member statuses, got %d", len(status.Members))
|
t.Errorf("Expected 2 member statuses, got %d", len(status.Members))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check member timeout calculations
|
// Check member timeout calculations
|
||||||
for _, memberStatus := range status.Members {
|
for _, memberStatus := range status.Members {
|
||||||
if memberStatus.SessionTimeRemaining < 0 {
|
if memberStatus.SessionTimeRemaining < 0 {
|
||||||
t.Errorf("Session time remaining should not be negative for member %s", memberStatus.MemberID)
|
t.Errorf("Session time remaining should not be negative for member %s", memberStatus.MemberID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if memberStatus.RebalanceTimeRemaining < 0 {
|
if memberStatus.RebalanceTimeRemaining < 0 {
|
||||||
t.Errorf("Rebalance time remaining should not be negative for member %s", memberStatus.MemberID)
|
t.Errorf("Rebalance time remaining should not be negative for member %s", memberStatus.MemberID)
|
||||||
}
|
}
|
||||||
@@ -300,14 +300,14 @@ func TestRebalanceTimeoutManager_GetRebalanceStatus(t *testing.T) {
|
|||||||
func TestRebalanceTimeoutManager_DefaultRebalanceTimeout(t *testing.T) {
|
func TestRebalanceTimeoutManager_DefaultRebalanceTimeout(t *testing.T) {
|
||||||
coordinator := NewGroupCoordinator()
|
coordinator := NewGroupCoordinator()
|
||||||
defer coordinator.Close()
|
defer coordinator.Close()
|
||||||
|
|
||||||
rtm := coordinator.rebalanceTimeoutManager
|
rtm := coordinator.rebalanceTimeoutManager
|
||||||
|
|
||||||
// Create a group with a member that has no rebalance timeout set (0)
|
// Create a group with a member that has no rebalance timeout set (0)
|
||||||
group := coordinator.GetOrCreateGroup("test-group")
|
group := coordinator.GetOrCreateGroup("test-group")
|
||||||
group.Mu.Lock()
|
group.Mu.Lock()
|
||||||
group.State = GroupStatePreparingRebalance
|
group.State = GroupStatePreparingRebalance
|
||||||
|
|
||||||
member := &GroupMember{
|
member := &GroupMember{
|
||||||
ID: "member1",
|
ID: "member1",
|
||||||
ClientID: "client1",
|
ClientID: "client1",
|
||||||
@@ -319,10 +319,10 @@ func TestRebalanceTimeoutManager_DefaultRebalanceTimeout(t *testing.T) {
|
|||||||
}
|
}
|
||||||
group.Members["member1"] = member
|
group.Members["member1"] = member
|
||||||
group.Mu.Unlock()
|
group.Mu.Unlock()
|
||||||
|
|
||||||
// Default rebalance timeout is 5 minutes (300000ms), so member should be evicted
|
// Default rebalance timeout is 5 minutes (300000ms), so member should be evicted
|
||||||
rtm.CheckRebalanceTimeouts()
|
rtm.CheckRebalanceTimeouts()
|
||||||
|
|
||||||
group.Mu.RLock()
|
group.Mu.RLock()
|
||||||
if len(group.Members) != 0 {
|
if len(group.Members) != 0 {
|
||||||
t.Errorf("Expected member to be evicted using default rebalance timeout, but %d members remain", len(group.Members))
|
t.Errorf("Expected member to be evicted using default rebalance timeout, but %d members remain", len(group.Members))
|
||||||
|
|||||||
@@ -142,4 +142,3 @@ func (m *MemoryStorage) Close() error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -206,4 +206,3 @@ func TestMemoryStorageOverwrite(t *testing.T) {
|
|||||||
assert.Equal(t, int64(20), offset)
|
assert.Equal(t, int64(20), offset)
|
||||||
assert.Equal(t, "meta2", metadata)
|
assert.Equal(t, "meta2", metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -56,4 +56,3 @@ var (
|
|||||||
ErrInvalidPartition = fmt.Errorf("invalid partition")
|
ErrInvalidPartition = fmt.Errorf("invalid partition")
|
||||||
ErrStorageClosed = fmt.Errorf("storage is closed")
|
ErrStorageClosed = fmt.Errorf("storage is closed")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -121,7 +121,6 @@ func (m *mockSeaweedMQHandler) ProduceRecord(ctx context.Context, topicName stri
|
|||||||
offset := m.offsets[topicName][partitionID]
|
offset := m.offsets[topicName][partitionID]
|
||||||
m.offsets[topicName][partitionID]++
|
m.offsets[topicName][partitionID]++
|
||||||
|
|
||||||
|
|
||||||
// Store record
|
// Store record
|
||||||
record := &mockRecord{
|
record := &mockRecord{
|
||||||
key: key,
|
key: key,
|
||||||
|
|||||||
@@ -9,5 +9,3 @@ package kafka
|
|||||||
// - offset/: Offset management
|
// - offset/: Offset management
|
||||||
// - schema/: Schema registry integration
|
// - schema/: Schema registry integration
|
||||||
// - consumer/: Consumer group coordination
|
// - consumer/: Consumer group coordination
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -51,5 +51,3 @@ func GetRangeSize() int32 {
|
|||||||
func GetMaxKafkaPartitions() int32 {
|
func GetMaxKafkaPartitions() int32 {
|
||||||
return int32(pub_balancer.MaxPartitionCount) / 35 // 72 partitions
|
return int32(pub_balancer.MaxPartitionCount) / 35 // 72 partitions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -37,7 +37,6 @@ func (h *Handler) handleDescribeCluster(correlationID uint32, apiVersion uint16,
|
|||||||
// Tagged fields at end of request
|
// Tagged fields at end of request
|
||||||
// (We don't parse them, just skip)
|
// (We don't parse them, just skip)
|
||||||
|
|
||||||
|
|
||||||
// Build response
|
// Build response
|
||||||
response := make([]byte, 0, 256)
|
response := make([]byte, 0, 256)
|
||||||
|
|
||||||
@@ -109,6 +108,5 @@ func (h *Handler) handleDescribeCluster(correlationID uint32, apiVersion uint16,
|
|||||||
// Response-level tagged fields (flexible response)
|
// Response-level tagged fields (flexible response)
|
||||||
response = append(response, 0x00) // Empty tagged fields
|
response = append(response, 0x00) // Empty tagged fields
|
||||||
|
|
||||||
|
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -268,7 +268,6 @@ func parseCompactString(data []byte) ([]byte, int) {
|
|||||||
return nil, 0
|
return nil, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if actualLength == 0 {
|
if actualLength == 0 {
|
||||||
// Empty string (length was 1)
|
// Empty string (length was 1)
|
||||||
return []byte{}, consumed
|
return []byte{}, consumed
|
||||||
|
|||||||
@@ -107,13 +107,13 @@ func (h *Handler) describeGroup(groupID string) DescribeGroupsGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return DescribeGroupsGroup{
|
return DescribeGroupsGroup{
|
||||||
ErrorCode: 0,
|
ErrorCode: 0,
|
||||||
GroupID: groupID,
|
GroupID: groupID,
|
||||||
State: stateStr,
|
State: stateStr,
|
||||||
ProtocolType: "consumer", // Default protocol type
|
ProtocolType: "consumer", // Default protocol type
|
||||||
Protocol: group.Protocol,
|
Protocol: group.Protocol,
|
||||||
Members: members,
|
Members: members,
|
||||||
AuthorizedOps: []int32{}, // Empty for now
|
AuthorizedOps: []int32{}, // Empty for now
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -175,8 +175,8 @@ func (h *Handler) listAllGroups(statesFilter []string) []ListGroupsGroup {
|
|||||||
// Request/Response structures
|
// Request/Response structures
|
||||||
|
|
||||||
type DescribeGroupsRequest struct {
|
type DescribeGroupsRequest struct {
|
||||||
GroupIDs []string
|
GroupIDs []string
|
||||||
IncludeAuthorizedOps bool
|
IncludeAuthorizedOps bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type DescribeGroupsResponse struct {
|
type DescribeGroupsResponse struct {
|
||||||
|
|||||||
@@ -661,7 +661,7 @@ func (h *Handler) HandleConn(ctx context.Context, conn net.Conn) error {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Removed V(4) logging from hot path - only log errors and important events
|
// Removed V(4) logging from hot path - only log errors and important events
|
||||||
|
|
||||||
// Wrap request processing with panic recovery to prevent deadlocks
|
// Wrap request processing with panic recovery to prevent deadlocks
|
||||||
// If processRequestSync panics, we MUST still send a response to avoid blocking the response writer
|
// If processRequestSync panics, we MUST still send a response to avoid blocking the response writer
|
||||||
var response []byte
|
var response []byte
|
||||||
@@ -881,7 +881,6 @@ func (h *Handler) HandleConn(ctx context.Context, conn net.Conn) error {
|
|||||||
return fmt.Errorf("read message: %w", err)
|
return fmt.Errorf("read message: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Parse at least the basic header to get API key and correlation ID
|
// Parse at least the basic header to get API key and correlation ID
|
||||||
if len(messageBuf) < 8 {
|
if len(messageBuf) < 8 {
|
||||||
return fmt.Errorf("message too short")
|
return fmt.Errorf("message too short")
|
||||||
@@ -890,7 +889,7 @@ func (h *Handler) HandleConn(ctx context.Context, conn net.Conn) error {
|
|||||||
apiKey := binary.BigEndian.Uint16(messageBuf[0:2])
|
apiKey := binary.BigEndian.Uint16(messageBuf[0:2])
|
||||||
apiVersion := binary.BigEndian.Uint16(messageBuf[2:4])
|
apiVersion := binary.BigEndian.Uint16(messageBuf[2:4])
|
||||||
correlationID := binary.BigEndian.Uint32(messageBuf[4:8])
|
correlationID := binary.BigEndian.Uint32(messageBuf[4:8])
|
||||||
|
|
||||||
// Validate API version against what we support
|
// Validate API version against what we support
|
||||||
if err := h.validateAPIVersion(apiKey, apiVersion); err != nil {
|
if err := h.validateAPIVersion(apiKey, apiVersion); err != nil {
|
||||||
glog.Errorf("API VERSION VALIDATION FAILED: Key=%d (%s), Version=%d, error=%v", apiKey, getAPIName(APIKey(apiKey)), apiVersion, err)
|
glog.Errorf("API VERSION VALIDATION FAILED: Key=%d (%s), Version=%d, error=%v", apiKey, getAPIName(APIKey(apiKey)), apiVersion, err)
|
||||||
@@ -1050,7 +1049,6 @@ func (h *Handler) processRequestSync(req *kafkaRequest) ([]byte, error) {
|
|||||||
requestStart := time.Now()
|
requestStart := time.Now()
|
||||||
apiName := getAPIName(APIKey(req.apiKey))
|
apiName := getAPIName(APIKey(req.apiKey))
|
||||||
|
|
||||||
|
|
||||||
// Only log high-volume requests at V(2), not V(4)
|
// Only log high-volume requests at V(2), not V(4)
|
||||||
if glog.V(2) {
|
if glog.V(2) {
|
||||||
glog.V(2).Infof("[API] %s (key=%d, ver=%d, corr=%d)",
|
glog.V(2).Infof("[API] %s (key=%d, ver=%d, corr=%d)",
|
||||||
@@ -1589,15 +1587,15 @@ func (h *Handler) HandleMetadataV2(correlationID uint32, requestBody []byte) ([]
|
|||||||
for partitionID := int32(0); partitionID < partitionCount; partitionID++ {
|
for partitionID := int32(0); partitionID < partitionCount; partitionID++ {
|
||||||
binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode
|
binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode
|
||||||
binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex
|
binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex
|
||||||
binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID
|
binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID
|
||||||
|
|
||||||
// ReplicaNodes array (4 bytes length + nodes)
|
// ReplicaNodes array (4 bytes length + nodes)
|
||||||
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica
|
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica
|
||||||
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
|
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
|
||||||
|
|
||||||
// IsrNodes array (4 bytes length + nodes)
|
// IsrNodes array (4 bytes length + nodes)
|
||||||
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node
|
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node
|
||||||
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
|
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1716,15 +1714,15 @@ func (h *Handler) HandleMetadataV3V4(correlationID uint32, requestBody []byte) (
|
|||||||
for partitionID := int32(0); partitionID < partitionCount; partitionID++ {
|
for partitionID := int32(0); partitionID < partitionCount; partitionID++ {
|
||||||
binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode
|
binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode
|
||||||
binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex
|
binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex
|
||||||
binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID
|
binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID
|
||||||
|
|
||||||
// ReplicaNodes array (4 bytes length + nodes)
|
// ReplicaNodes array (4 bytes length + nodes)
|
||||||
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica
|
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica
|
||||||
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
|
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
|
||||||
|
|
||||||
// IsrNodes array (4 bytes length + nodes)
|
// IsrNodes array (4 bytes length + nodes)
|
||||||
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node
|
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node
|
||||||
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
|
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1737,7 +1735,7 @@ func (h *Handler) HandleMetadataV3V4(correlationID uint32, requestBody []byte) (
|
|||||||
}
|
}
|
||||||
if len(response) > 100 {
|
if len(response) > 100 {
|
||||||
}
|
}
|
||||||
|
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1828,7 +1826,6 @@ func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte,
|
|||||||
// NOTE: Correlation ID is handled by writeResponseWithCorrelationID
|
// NOTE: Correlation ID is handled by writeResponseWithCorrelationID
|
||||||
// Do NOT include it in the response body
|
// Do NOT include it in the response body
|
||||||
|
|
||||||
|
|
||||||
// ThrottleTimeMs (4 bytes) - v3+ addition
|
// ThrottleTimeMs (4 bytes) - v3+ addition
|
||||||
binary.Write(&buf, binary.BigEndian, int32(0)) // No throttling
|
binary.Write(&buf, binary.BigEndian, int32(0)) // No throttling
|
||||||
|
|
||||||
@@ -1896,7 +1893,7 @@ func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte,
|
|||||||
for partitionID := int32(0); partitionID < partitionCount; partitionID++ {
|
for partitionID := int32(0); partitionID < partitionCount; partitionID++ {
|
||||||
binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode
|
binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode
|
||||||
binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex
|
binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex
|
||||||
binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID
|
binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID
|
||||||
|
|
||||||
// LeaderEpoch (4 bytes) - v7+ addition
|
// LeaderEpoch (4 bytes) - v7+ addition
|
||||||
if apiVersion >= 7 {
|
if apiVersion >= 7 {
|
||||||
@@ -1905,11 +1902,11 @@ func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte,
|
|||||||
|
|
||||||
// ReplicaNodes array (4 bytes length + nodes)
|
// ReplicaNodes array (4 bytes length + nodes)
|
||||||
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica
|
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica
|
||||||
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
|
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
|
||||||
|
|
||||||
// IsrNodes array (4 bytes length + nodes)
|
// IsrNodes array (4 bytes length + nodes)
|
||||||
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node
|
binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node
|
||||||
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
|
binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1
|
||||||
|
|
||||||
// OfflineReplicas array (4 bytes length + nodes) - v5+ addition
|
// OfflineReplicas array (4 bytes length + nodes) - v5+ addition
|
||||||
binary.Write(&buf, binary.BigEndian, int32(0)) // No offline replicas
|
binary.Write(&buf, binary.BigEndian, int32(0)) // No offline replicas
|
||||||
@@ -1930,7 +1927,7 @@ func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte,
|
|||||||
}
|
}
|
||||||
if len(response) > 100 {
|
if len(response) > 100 {
|
||||||
}
|
}
|
||||||
|
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1994,12 +1991,11 @@ func (h *Handler) handleListOffsets(correlationID uint32, apiVersion uint16, req
|
|||||||
// Parse minimal request to understand what's being asked (header already stripped)
|
// Parse minimal request to understand what's being asked (header already stripped)
|
||||||
offset := 0
|
offset := 0
|
||||||
|
|
||||||
|
|
||||||
maxBytes := len(requestBody)
|
maxBytes := len(requestBody)
|
||||||
if maxBytes > 64 {
|
if maxBytes > 64 {
|
||||||
maxBytes = 64
|
maxBytes = 64
|
||||||
}
|
}
|
||||||
|
|
||||||
// v1+ has replica_id(4)
|
// v1+ has replica_id(4)
|
||||||
if apiVersion >= 1 {
|
if apiVersion >= 1 {
|
||||||
if len(requestBody) < offset+4 {
|
if len(requestBody) < offset+4 {
|
||||||
@@ -3930,12 +3926,11 @@ func (h *Handler) handleInitProducerId(correlationID uint32, apiVersion uint16,
|
|||||||
// v2+: transactional_id(NULLABLE_STRING) + transaction_timeout_ms(INT32) + producer_id(INT64) + producer_epoch(INT16)
|
// v2+: transactional_id(NULLABLE_STRING) + transaction_timeout_ms(INT32) + producer_id(INT64) + producer_epoch(INT16)
|
||||||
// v4+: Uses flexible format with tagged fields
|
// v4+: Uses flexible format with tagged fields
|
||||||
|
|
||||||
|
|
||||||
maxBytes := len(requestBody)
|
maxBytes := len(requestBody)
|
||||||
if maxBytes > 64 {
|
if maxBytes > 64 {
|
||||||
maxBytes = 64
|
maxBytes = 64
|
||||||
}
|
}
|
||||||
|
|
||||||
offset := 0
|
offset := 0
|
||||||
|
|
||||||
// Parse transactional_id (NULLABLE_STRING or COMPACT_NULLABLE_STRING for flexible versions)
|
// Parse transactional_id (NULLABLE_STRING or COMPACT_NULLABLE_STRING for flexible versions)
|
||||||
|
|||||||
@@ -47,4 +47,3 @@ func (a *offsetStorageAdapter) DeleteGroup(group string) error {
|
|||||||
func (a *offsetStorageAdapter) Close() error {
|
func (a *offsetStorageAdapter) Close() error {
|
||||||
return a.storage.Close()
|
return a.storage.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -140,4 +140,3 @@ func TestMetadataResponseHasBrokers(t *testing.T) {
|
|||||||
|
|
||||||
t.Logf("✓ Metadata response correctly has %d broker(s)", parsedCount)
|
t.Logf("✓ Metadata response correctly has %d broker(s)", parsedCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,46 +7,46 @@ import (
|
|||||||
|
|
||||||
func TestParseConfluentEnvelope(t *testing.T) {
|
func TestParseConfluentEnvelope(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
input []byte
|
input []byte
|
||||||
expectOK bool
|
expectOK bool
|
||||||
expectID uint32
|
expectID uint32
|
||||||
expectFormat Format
|
expectFormat Format
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "valid Avro message",
|
name: "valid Avro message",
|
||||||
input: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x10, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, // schema ID 1 + "Hello"
|
input: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x10, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, // schema ID 1 + "Hello"
|
||||||
expectOK: true,
|
expectOK: true,
|
||||||
expectID: 1,
|
expectID: 1,
|
||||||
expectFormat: FormatAvro,
|
expectFormat: FormatAvro,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "valid message with larger schema ID",
|
name: "valid message with larger schema ID",
|
||||||
input: []byte{0x00, 0x00, 0x00, 0x04, 0xd2, 0x02, 0x66, 0x6f, 0x6f}, // schema ID 1234 + "foo"
|
input: []byte{0x00, 0x00, 0x00, 0x04, 0xd2, 0x02, 0x66, 0x6f, 0x6f}, // schema ID 1234 + "foo"
|
||||||
expectOK: true,
|
expectOK: true,
|
||||||
expectID: 1234,
|
expectID: 1234,
|
||||||
expectFormat: FormatAvro,
|
expectFormat: FormatAvro,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "too short message",
|
name: "too short message",
|
||||||
input: []byte{0x00, 0x00, 0x00},
|
input: []byte{0x00, 0x00, 0x00},
|
||||||
expectOK: false,
|
expectOK: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "no magic byte",
|
name: "no magic byte",
|
||||||
input: []byte{0x01, 0x00, 0x00, 0x00, 0x01, 0x48, 0x65, 0x6c, 0x6c, 0x6f},
|
input: []byte{0x01, 0x00, 0x00, 0x00, 0x01, 0x48, 0x65, 0x6c, 0x6c, 0x6f},
|
||||||
expectOK: false,
|
expectOK: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "empty message",
|
name: "empty message",
|
||||||
input: []byte{},
|
input: []byte{},
|
||||||
expectOK: false,
|
expectOK: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "minimal valid message",
|
name: "minimal valid message",
|
||||||
input: []byte{0x00, 0x00, 0x00, 0x00, 0x01}, // schema ID 1, empty payload
|
input: []byte{0x00, 0x00, 0x00, 0x00, 0x01}, // schema ID 1, empty payload
|
||||||
expectOK: true,
|
expectOK: true,
|
||||||
expectID: 1,
|
expectID: 1,
|
||||||
expectFormat: FormatAvro,
|
expectFormat: FormatAvro,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -54,24 +54,24 @@ func TestParseConfluentEnvelope(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
envelope, ok := ParseConfluentEnvelope(tt.input)
|
envelope, ok := ParseConfluentEnvelope(tt.input)
|
||||||
|
|
||||||
if ok != tt.expectOK {
|
if ok != tt.expectOK {
|
||||||
t.Errorf("ParseConfluentEnvelope() ok = %v, want %v", ok, tt.expectOK)
|
t.Errorf("ParseConfluentEnvelope() ok = %v, want %v", ok, tt.expectOK)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !tt.expectOK {
|
if !tt.expectOK {
|
||||||
return // No need to check further if we expected failure
|
return // No need to check further if we expected failure
|
||||||
}
|
}
|
||||||
|
|
||||||
if envelope.SchemaID != tt.expectID {
|
if envelope.SchemaID != tt.expectID {
|
||||||
t.Errorf("ParseConfluentEnvelope() schemaID = %v, want %v", envelope.SchemaID, tt.expectID)
|
t.Errorf("ParseConfluentEnvelope() schemaID = %v, want %v", envelope.SchemaID, tt.expectID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if envelope.Format != tt.expectFormat {
|
if envelope.Format != tt.expectFormat {
|
||||||
t.Errorf("ParseConfluentEnvelope() format = %v, want %v", envelope.Format, tt.expectFormat)
|
t.Errorf("ParseConfluentEnvelope() format = %v, want %v", envelope.Format, tt.expectFormat)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify payload extraction
|
// Verify payload extraction
|
||||||
expectedPayloadLen := len(tt.input) - 5 // 5 bytes for magic + schema ID
|
expectedPayloadLen := len(tt.input) - 5 // 5 bytes for magic + schema ID
|
||||||
if len(envelope.Payload) != expectedPayloadLen {
|
if len(envelope.Payload) != expectedPayloadLen {
|
||||||
@@ -150,11 +150,11 @@ func TestExtractSchemaID(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
id, ok := ExtractSchemaID(tt.input)
|
id, ok := ExtractSchemaID(tt.input)
|
||||||
|
|
||||||
if ok != tt.expectOK {
|
if ok != tt.expectOK {
|
||||||
t.Errorf("ExtractSchemaID() ok = %v, want %v", ok, tt.expectOK)
|
t.Errorf("ExtractSchemaID() ok = %v, want %v", ok, tt.expectOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
if id != tt.expectID {
|
if id != tt.expectID {
|
||||||
t.Errorf("ExtractSchemaID() id = %v, want %v", id, tt.expectID)
|
t.Errorf("ExtractSchemaID() id = %v, want %v", id, tt.expectID)
|
||||||
}
|
}
|
||||||
@@ -200,12 +200,12 @@ func TestCreateConfluentEnvelope(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result := CreateConfluentEnvelope(tt.format, tt.schemaID, tt.indexes, tt.payload)
|
result := CreateConfluentEnvelope(tt.format, tt.schemaID, tt.indexes, tt.payload)
|
||||||
|
|
||||||
if len(result) != len(tt.expected) {
|
if len(result) != len(tt.expected) {
|
||||||
t.Errorf("CreateConfluentEnvelope() length = %v, want %v", len(result), len(tt.expected))
|
t.Errorf("CreateConfluentEnvelope() length = %v, want %v", len(result), len(tt.expected))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, b := range result {
|
for i, b := range result {
|
||||||
if b != tt.expected[i] {
|
if b != tt.expected[i] {
|
||||||
t.Errorf("CreateConfluentEnvelope() byte[%d] = %v, want %v", i, b, tt.expected[i])
|
t.Errorf("CreateConfluentEnvelope() byte[%d] = %v, want %v", i, b, tt.expected[i])
|
||||||
@@ -262,7 +262,7 @@ func TestEnvelopeValidate(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
err := tt.envelope.Validate()
|
err := tt.envelope.Validate()
|
||||||
|
|
||||||
if (err != nil) != tt.expectErr {
|
if (err != nil) != tt.expectErr {
|
||||||
t.Errorf("Envelope.Validate() error = %v, expectErr %v", err, tt.expectErr)
|
t.Errorf("Envelope.Validate() error = %v, expectErr %v", err, tt.expectErr)
|
||||||
}
|
}
|
||||||
@@ -297,7 +297,7 @@ func TestEnvelopeMetadata(t *testing.T) {
|
|||||||
func BenchmarkParseConfluentEnvelope(b *testing.B) {
|
func BenchmarkParseConfluentEnvelope(b *testing.B) {
|
||||||
// Create a test message
|
// Create a test message
|
||||||
testMsg := make([]byte, 1024)
|
testMsg := make([]byte, 1024)
|
||||||
testMsg[0] = 0x00 // Magic byte
|
testMsg[0] = 0x00 // Magic byte
|
||||||
binary.BigEndian.PutUint32(testMsg[1:5], 123) // Schema ID
|
binary.BigEndian.PutUint32(testMsg[1:5], 123) // Schema ID
|
||||||
// Fill rest with dummy data
|
// Fill rest with dummy data
|
||||||
for i := 5; i < len(testMsg); i++ {
|
for i := 5; i < len(testMsg); i++ {
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ func TestCreateConfluentEnvelopeWithProtobufIndexes(t *testing.T) {
|
|||||||
parsed, ok := ParseConfluentEnvelope(envelope)
|
parsed, ok := ParseConfluentEnvelope(envelope)
|
||||||
require.True(t, ok, "Should be able to parse envelope")
|
require.True(t, ok, "Should be able to parse envelope")
|
||||||
assert.Equal(t, tc.schemaID, parsed.SchemaID)
|
assert.Equal(t, tc.schemaID, parsed.SchemaID)
|
||||||
|
|
||||||
if tc.format == FormatProtobuf && len(tc.indexes) == 0 {
|
if tc.format == FormatProtobuf && len(tc.indexes) == 0 {
|
||||||
// For Protobuf without indexes, payload should match
|
// For Protobuf without indexes, payload should match
|
||||||
assert.Equal(t, tc.payload, parsed.Payload, "Payload should match")
|
assert.Equal(t, tc.payload, parsed.Payload, "Payload should match")
|
||||||
|
|||||||
@@ -17,5 +17,3 @@ const (
|
|||||||
// Source file tracking for parquet deduplication
|
// Source file tracking for parquet deduplication
|
||||||
ExtendedAttrSources = "sources" // JSON-encoded list of source log files
|
ExtendedAttrSources = "sources" // JSON-encoded list of source log files
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -118,17 +118,17 @@ func (m *MigrationManager) GetCurrentVersion() (int, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("failed to create migrations table: %w", err)
|
return 0, fmt.Errorf("failed to create migrations table: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var version sql.NullInt64
|
var version sql.NullInt64
|
||||||
err = m.db.QueryRow("SELECT MAX(version) FROM schema_migrations").Scan(&version)
|
err = m.db.QueryRow("SELECT MAX(version) FROM schema_migrations").Scan(&version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("failed to get current version: %w", err)
|
return 0, fmt.Errorf("failed to get current version: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !version.Valid {
|
if !version.Valid {
|
||||||
return 0, nil // No migrations applied yet
|
return 0, nil // No migrations applied yet
|
||||||
}
|
}
|
||||||
|
|
||||||
return int(version.Int64), nil
|
return int(version.Int64), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -138,29 +138,29 @@ func (m *MigrationManager) ApplyMigrations() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get current version: %w", err)
|
return fmt.Errorf("failed to get current version: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
migrations := GetMigrations()
|
migrations := GetMigrations()
|
||||||
|
|
||||||
for _, migration := range migrations {
|
for _, migration := range migrations {
|
||||||
if migration.Version <= currentVersion {
|
if migration.Version <= currentVersion {
|
||||||
continue // Already applied
|
continue // Already applied
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Applying migration %d: %s\n", migration.Version, migration.Description)
|
fmt.Printf("Applying migration %d: %s\n", migration.Version, migration.Description)
|
||||||
|
|
||||||
// Begin transaction
|
// Begin transaction
|
||||||
tx, err := m.db.Begin()
|
tx, err := m.db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to begin transaction for migration %d: %w", migration.Version, err)
|
return fmt.Errorf("failed to begin transaction for migration %d: %w", migration.Version, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute migration SQL
|
// Execute migration SQL
|
||||||
_, err = tx.Exec(migration.SQL)
|
_, err = tx.Exec(migration.SQL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tx.Rollback()
|
tx.Rollback()
|
||||||
return fmt.Errorf("failed to execute migration %d: %w", migration.Version, err)
|
return fmt.Errorf("failed to execute migration %d: %w", migration.Version, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record migration as applied
|
// Record migration as applied
|
||||||
_, err = tx.Exec(
|
_, err = tx.Exec(
|
||||||
"INSERT INTO schema_migrations (version, description, applied_at) VALUES (?, ?, ?)",
|
"INSERT INTO schema_migrations (version, description, applied_at) VALUES (?, ?, ?)",
|
||||||
@@ -172,16 +172,16 @@ func (m *MigrationManager) ApplyMigrations() error {
|
|||||||
tx.Rollback()
|
tx.Rollback()
|
||||||
return fmt.Errorf("failed to record migration %d: %w", migration.Version, err)
|
return fmt.Errorf("failed to record migration %d: %w", migration.Version, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit transaction
|
// Commit transaction
|
||||||
err = tx.Commit()
|
err = tx.Commit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to commit migration %d: %w", migration.Version, err)
|
return fmt.Errorf("failed to commit migration %d: %w", migration.Version, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Successfully applied migration %d\n", migration.Version)
|
fmt.Printf("Successfully applied migration %d\n", migration.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -203,7 +203,7 @@ func (m *MigrationManager) GetAppliedMigrations() ([]AppliedMigration, error) {
|
|||||||
return nil, fmt.Errorf("failed to query applied migrations: %w", err)
|
return nil, fmt.Errorf("failed to query applied migrations: %w", err)
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
var migrations []AppliedMigration
|
var migrations []AppliedMigration
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var migration AppliedMigration
|
var migration AppliedMigration
|
||||||
@@ -213,7 +213,7 @@ func (m *MigrationManager) GetAppliedMigrations() ([]AppliedMigration, error) {
|
|||||||
}
|
}
|
||||||
migrations = append(migrations, migration)
|
migrations = append(migrations, migration)
|
||||||
}
|
}
|
||||||
|
|
||||||
return migrations, nil
|
return migrations, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -223,17 +223,17 @@ func (m *MigrationManager) ValidateSchema() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get current version: %w", err)
|
return fmt.Errorf("failed to get current version: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
migrations := GetMigrations()
|
migrations := GetMigrations()
|
||||||
if len(migrations) == 0 {
|
if len(migrations) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
latestVersion := migrations[len(migrations)-1].Version
|
latestVersion := migrations[len(migrations)-1].Version
|
||||||
if currentVersion < latestVersion {
|
if currentVersion < latestVersion {
|
||||||
return fmt.Errorf("schema is outdated: current version %d, latest version %d", currentVersion, latestVersion)
|
return fmt.Errorf("schema is outdated: current version %d, latest version %d", currentVersion, latestVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -253,21 +253,21 @@ func getCurrentTimestamp() int64 {
|
|||||||
func CreateDatabase(dbPath string) (*sql.DB, error) {
|
func CreateDatabase(dbPath string) (*sql.DB, error) {
|
||||||
// TODO: Support different database types (PostgreSQL, MySQL, etc.)
|
// TODO: Support different database types (PostgreSQL, MySQL, etc.)
|
||||||
// ASSUMPTION: Using SQLite for now, can be extended for other databases
|
// ASSUMPTION: Using SQLite for now, can be extended for other databases
|
||||||
|
|
||||||
db, err := sql.Open("sqlite3", dbPath)
|
db, err := sql.Open("sqlite3", dbPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to open database: %w", err)
|
return nil, fmt.Errorf("failed to open database: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure SQLite for better performance
|
// Configure SQLite for better performance
|
||||||
pragmas := []string{
|
pragmas := []string{
|
||||||
"PRAGMA journal_mode=WAL", // Write-Ahead Logging for better concurrency
|
"PRAGMA journal_mode=WAL", // Write-Ahead Logging for better concurrency
|
||||||
"PRAGMA synchronous=NORMAL", // Balance between safety and performance
|
"PRAGMA synchronous=NORMAL", // Balance between safety and performance
|
||||||
"PRAGMA cache_size=10000", // Increase cache size
|
"PRAGMA cache_size=10000", // Increase cache size
|
||||||
"PRAGMA foreign_keys=ON", // Enable foreign key constraints
|
"PRAGMA foreign_keys=ON", // Enable foreign key constraints
|
||||||
"PRAGMA temp_store=MEMORY", // Store temporary tables in memory
|
"PRAGMA temp_store=MEMORY", // Store temporary tables in memory
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, pragma := range pragmas {
|
for _, pragma := range pragmas {
|
||||||
_, err := db.Exec(pragma)
|
_, err := db.Exec(pragma)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -275,7 +275,7 @@ func CreateDatabase(dbPath string) (*sql.DB, error) {
|
|||||||
return nil, fmt.Errorf("failed to set pragma %s: %w", pragma, err)
|
return nil, fmt.Errorf("failed to set pragma %s: %w", pragma, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply migrations
|
// Apply migrations
|
||||||
migrationManager := NewMigrationManager(db)
|
migrationManager := NewMigrationManager(db)
|
||||||
err = migrationManager.ApplyMigrations()
|
err = migrationManager.ApplyMigrations()
|
||||||
@@ -283,7 +283,7 @@ func CreateDatabase(dbPath string) (*sql.DB, error) {
|
|||||||
db.Close()
|
db.Close()
|
||||||
return nil, fmt.Errorf("failed to apply migrations: %w", err)
|
return nil, fmt.Errorf("failed to apply migrations: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return db, nil
|
return db, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -248,11 +248,11 @@ func TestValidateKeyColumns(t *testing.T) {
|
|||||||
|
|
||||||
// Helper function to check if string contains substring
|
// Helper function to check if string contains substring
|
||||||
func contains(str, substr string) bool {
|
func contains(str, substr string) bool {
|
||||||
return len(str) >= len(substr) &&
|
return len(str) >= len(substr) &&
|
||||||
(len(substr) == 0 || str[len(str)-len(substr):] == substr ||
|
(len(substr) == 0 || str[len(str)-len(substr):] == substr ||
|
||||||
str[:len(substr)] == substr ||
|
str[:len(substr)] == substr ||
|
||||||
len(str) > len(substr) && (str[len(str)-len(substr)-1:len(str)-len(substr)] == " " || str[len(str)-len(substr)-1] == ' ') && str[len(str)-len(substr):] == substr ||
|
len(str) > len(substr) && (str[len(str)-len(substr)-1:len(str)-len(substr)] == " " || str[len(str)-len(substr)-1] == ' ') && str[len(str)-len(substr):] == substr ||
|
||||||
findInString(str, substr))
|
findInString(str, substr))
|
||||||
}
|
}
|
||||||
|
|
||||||
func findInString(str, substr string) bool {
|
func findInString(str, substr string) bool {
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package mq_agent_pb
|
package mq_agent_pb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPublishRecordResponseSerialization(t *testing.T) {
|
func TestPublishRecordResponseSerialization(t *testing.T) {
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package schema_pb
|
package schema_pb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestOffsetTypeEnums(t *testing.T) {
|
func TestOffsetTypeEnums(t *testing.T) {
|
||||||
@@ -34,8 +34,8 @@ func TestPartitionOffsetSerialization(t *testing.T) {
|
|||||||
RangeStop: 31,
|
RangeStop: 31,
|
||||||
UnixTimeNs: 1234567890,
|
UnixTimeNs: 1234567890,
|
||||||
},
|
},
|
||||||
StartTsNs: 1234567890,
|
StartTsNs: 1234567890,
|
||||||
StartOffset: 42, // New field
|
StartOffset: 42, // New field
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test proto marshaling/unmarshaling
|
// Test proto marshaling/unmarshaling
|
||||||
|
|||||||
@@ -229,22 +229,22 @@ func TestToMetadata(t *testing.T) {
|
|||||||
s3_constants.AmzUserMetaPrefix + "789": []byte("value3"),
|
s3_constants.AmzUserMetaPrefix + "789": []byte("value3"),
|
||||||
},
|
},
|
||||||
expected: map[string]*string{
|
expected: map[string]*string{
|
||||||
"_123key": stringPtr("value1"), // starts with digit -> prefix _
|
"_123key": stringPtr("value1"), // starts with digit -> prefix _
|
||||||
"_456_2d_test": stringPtr("value2"), // starts with digit AND has dash
|
"_456_2d_test": stringPtr("value2"), // starts with digit AND has dash
|
||||||
"_789": stringPtr("value3"),
|
"_789": stringPtr("value3"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "uppercase and mixed case keys",
|
name: "uppercase and mixed case keys",
|
||||||
input: map[string][]byte{
|
input: map[string][]byte{
|
||||||
s3_constants.AmzUserMetaPrefix + "My-Key": []byte("value1"),
|
s3_constants.AmzUserMetaPrefix + "My-Key": []byte("value1"),
|
||||||
s3_constants.AmzUserMetaPrefix + "UPPERCASE": []byte("value2"),
|
s3_constants.AmzUserMetaPrefix + "UPPERCASE": []byte("value2"),
|
||||||
s3_constants.AmzUserMetaPrefix + "MiXeD-CaSe": []byte("value3"),
|
s3_constants.AmzUserMetaPrefix + "MiXeD-CaSe": []byte("value3"),
|
||||||
},
|
},
|
||||||
expected: map[string]*string{
|
expected: map[string]*string{
|
||||||
"my_2d_key": stringPtr("value1"), // lowercase + dash -> _2d_
|
"my_2d_key": stringPtr("value1"), // lowercase + dash -> _2d_
|
||||||
"uppercase": stringPtr("value2"),
|
"uppercase": stringPtr("value2"),
|
||||||
"mixed_2d_case": stringPtr("value3"),
|
"mixed_2d_case": stringPtr("value3"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ func TestSSES3EndToEndSmallFile(t *testing.T) {
|
|||||||
|
|
||||||
// Step 3: Decrypt (simulates what happens during GET)
|
// Step 3: Decrypt (simulates what happens during GET)
|
||||||
// This tests the IV retrieval path for inline files
|
// This tests the IV retrieval path for inline files
|
||||||
|
|
||||||
// First, deserialize metadata from storage
|
// First, deserialize metadata from storage
|
||||||
retrievedKeyData := mockEntry.Extended[s3_constants.SeaweedFSSSES3Key]
|
retrievedKeyData := mockEntry.Extended[s3_constants.SeaweedFSSSES3Key]
|
||||||
retrievedKey, err := DeserializeSSES3Metadata(retrievedKeyData, keyManager)
|
retrievedKey, err := DeserializeSSES3Metadata(retrievedKeyData, keyManager)
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ func ValidateSSES3Key(sseKey *SSES3Key) error {
|
|||||||
if sseKey == nil {
|
if sseKey == nil {
|
||||||
return fmt.Errorf("SSE-S3 key cannot be nil")
|
return fmt.Errorf("SSE-S3 key cannot be nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate key bytes
|
// Validate key bytes
|
||||||
if sseKey.Key == nil {
|
if sseKey.Key == nil {
|
||||||
return fmt.Errorf("SSE-S3 key bytes cannot be nil")
|
return fmt.Errorf("SSE-S3 key bytes cannot be nil")
|
||||||
@@ -79,22 +79,22 @@ func ValidateSSES3Key(sseKey *SSES3Key) error {
|
|||||||
if len(sseKey.Key) != SSES3KeySize {
|
if len(sseKey.Key) != SSES3KeySize {
|
||||||
return fmt.Errorf("invalid SSE-S3 key size: expected %d bytes, got %d", SSES3KeySize, len(sseKey.Key))
|
return fmt.Errorf("invalid SSE-S3 key size: expected %d bytes, got %d", SSES3KeySize, len(sseKey.Key))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate algorithm
|
// Validate algorithm
|
||||||
if sseKey.Algorithm != SSES3Algorithm {
|
if sseKey.Algorithm != SSES3Algorithm {
|
||||||
return fmt.Errorf("invalid SSE-S3 algorithm: expected %q, got %q", SSES3Algorithm, sseKey.Algorithm)
|
return fmt.Errorf("invalid SSE-S3 algorithm: expected %q, got %q", SSES3Algorithm, sseKey.Algorithm)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate key ID (should not be empty)
|
// Validate key ID (should not be empty)
|
||||||
if sseKey.KeyID == "" {
|
if sseKey.KeyID == "" {
|
||||||
return fmt.Errorf("SSE-S3 key ID cannot be empty")
|
return fmt.Errorf("SSE-S3 key ID cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
// IV validation is optional during key creation - it will be set during encryption
|
// IV validation is optional during key creation - it will be set during encryption
|
||||||
// If IV is set, validate its length
|
// If IV is set, validate its length
|
||||||
if len(sseKey.IV) > 0 && len(sseKey.IV) != s3_constants.AESBlockSize {
|
if len(sseKey.IV) > 0 && len(sseKey.IV) != s3_constants.AESBlockSize {
|
||||||
return fmt.Errorf("invalid SSE-S3 IV length: expected %d bytes, got %d", s3_constants.AESBlockSize, len(sseKey.IV))
|
return fmt.Errorf("invalid SSE-S3 IV length: expected %d bytes, got %d", s3_constants.AESBlockSize, len(sseKey.IV))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,12 +74,12 @@ func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection
|
|||||||
ev.Version = needle.Version(volumeInfo.Version)
|
ev.Version = needle.Version(volumeInfo.Version)
|
||||||
ev.datFileSize = volumeInfo.DatFileSize
|
ev.datFileSize = volumeInfo.DatFileSize
|
||||||
ev.ExpireAtSec = volumeInfo.ExpireAtSec
|
ev.ExpireAtSec = volumeInfo.ExpireAtSec
|
||||||
|
|
||||||
// Initialize EC context from .vif if present; fallback to defaults
|
// Initialize EC context from .vif if present; fallback to defaults
|
||||||
if volumeInfo.EcShardConfig != nil {
|
if volumeInfo.EcShardConfig != nil {
|
||||||
ds := int(volumeInfo.EcShardConfig.DataShards)
|
ds := int(volumeInfo.EcShardConfig.DataShards)
|
||||||
ps := int(volumeInfo.EcShardConfig.ParityShards)
|
ps := int(volumeInfo.EcShardConfig.ParityShards)
|
||||||
|
|
||||||
// Validate shard counts to prevent zero or invalid values
|
// Validate shard counts to prevent zero or invalid values
|
||||||
if ds <= 0 || ps <= 0 || ds+ps > MaxShardCount {
|
if ds <= 0 || ps <= 0 || ds+ps > MaxShardCount {
|
||||||
glog.Warningf("Invalid EC config in VolumeInfo for volume %d (data=%d, parity=%d), using defaults", vid, ds, ps)
|
glog.Warningf("Invalid EC config in VolumeInfo for volume %d (data=%d, parity=%d), using defaults", vid, ds, ps)
|
||||||
|
|||||||
@@ -15,10 +15,11 @@ import (
|
|||||||
// are lost in the gap between flushed disk data and in-memory buffer.
|
// are lost in the gap between flushed disk data and in-memory buffer.
|
||||||
//
|
//
|
||||||
// OBSERVED BEHAVIOR FROM LOGS:
|
// OBSERVED BEHAVIOR FROM LOGS:
|
||||||
// Request offset: 1764
|
//
|
||||||
// Disk contains: 1000-1763 (764 messages)
|
// Request offset: 1764
|
||||||
// Memory buffer starts at: 1800
|
// Disk contains: 1000-1763 (764 messages)
|
||||||
// Gap: 1764-1799 (36 messages) ← MISSING!
|
// Memory buffer starts at: 1800
|
||||||
|
// Gap: 1764-1799 (36 messages) ← MISSING!
|
||||||
//
|
//
|
||||||
// This test verifies:
|
// This test verifies:
|
||||||
// 1. All messages sent to buffer are accounted for
|
// 1. All messages sent to buffer are accounted for
|
||||||
@@ -27,46 +28,46 @@ import (
|
|||||||
func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) {
|
func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) {
|
||||||
var flushedMessages []*filer_pb.LogEntry
|
var flushedMessages []*filer_pb.LogEntry
|
||||||
var flushMu sync.Mutex
|
var flushMu sync.Mutex
|
||||||
|
|
||||||
flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
|
flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
|
||||||
t.Logf("FLUSH: minOffset=%d maxOffset=%d size=%d bytes", minOffset, maxOffset, len(buf))
|
t.Logf("FLUSH: minOffset=%d maxOffset=%d size=%d bytes", minOffset, maxOffset, len(buf))
|
||||||
|
|
||||||
// Parse and store flushed messages
|
// Parse and store flushed messages
|
||||||
flushMu.Lock()
|
flushMu.Lock()
|
||||||
defer flushMu.Unlock()
|
defer flushMu.Unlock()
|
||||||
|
|
||||||
// Parse buffer to extract messages
|
// Parse buffer to extract messages
|
||||||
parsedCount := 0
|
parsedCount := 0
|
||||||
for pos := 0; pos+4 < len(buf); {
|
for pos := 0; pos+4 < len(buf); {
|
||||||
if pos+4 > len(buf) {
|
if pos+4 > len(buf) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
size := uint32(buf[pos])<<24 | uint32(buf[pos+1])<<16 | uint32(buf[pos+2])<<8 | uint32(buf[pos+3])
|
size := uint32(buf[pos])<<24 | uint32(buf[pos+1])<<16 | uint32(buf[pos+2])<<8 | uint32(buf[pos+3])
|
||||||
if pos+4+int(size) > len(buf) {
|
if pos+4+int(size) > len(buf) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
entryData := buf[pos+4 : pos+4+int(size)]
|
entryData := buf[pos+4 : pos+4+int(size)]
|
||||||
logEntry := &filer_pb.LogEntry{}
|
logEntry := &filer_pb.LogEntry{}
|
||||||
if err := proto.Unmarshal(entryData, logEntry); err == nil {
|
if err := proto.Unmarshal(entryData, logEntry); err == nil {
|
||||||
flushedMessages = append(flushedMessages, logEntry)
|
flushedMessages = append(flushedMessages, logEntry)
|
||||||
parsedCount++
|
parsedCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
pos += 4 + int(size)
|
pos += 4 + int(size)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logf(" Parsed %d messages from flush buffer", parsedCount)
|
t.Logf(" Parsed %d messages from flush buffer", parsedCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
logBuffer := NewLogBuffer("test", 100*time.Millisecond, flushFn, nil, nil)
|
logBuffer := NewLogBuffer("test", 100*time.Millisecond, flushFn, nil, nil)
|
||||||
defer logBuffer.ShutdownLogBuffer()
|
defer logBuffer.ShutdownLogBuffer()
|
||||||
|
|
||||||
// Send 100 messages
|
// Send 100 messages
|
||||||
messageCount := 100
|
messageCount := 100
|
||||||
t.Logf("Sending %d messages...", messageCount)
|
t.Logf("Sending %d messages...", messageCount)
|
||||||
|
|
||||||
for i := 0; i < messageCount; i++ {
|
for i := 0; i < messageCount; i++ {
|
||||||
logBuffer.AddToBuffer(&mq_pb.DataMessage{
|
logBuffer.AddToBuffer(&mq_pb.DataMessage{
|
||||||
Key: []byte(fmt.Sprintf("key-%d", i)),
|
Key: []byte(fmt.Sprintf("key-%d", i)),
|
||||||
@@ -74,11 +75,11 @@ func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) {
|
|||||||
TsNs: time.Now().UnixNano(),
|
TsNs: time.Now().UnixNano(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Force flush multiple times to simulate real workload
|
// Force flush multiple times to simulate real workload
|
||||||
t.Logf("Forcing flush...")
|
t.Logf("Forcing flush...")
|
||||||
logBuffer.ForceFlush()
|
logBuffer.ForceFlush()
|
||||||
|
|
||||||
// Add more messages after flush
|
// Add more messages after flush
|
||||||
for i := messageCount; i < messageCount+50; i++ {
|
for i := messageCount; i < messageCount+50; i++ {
|
||||||
logBuffer.AddToBuffer(&mq_pb.DataMessage{
|
logBuffer.AddToBuffer(&mq_pb.DataMessage{
|
||||||
@@ -87,18 +88,18 @@ func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) {
|
|||||||
TsNs: time.Now().UnixNano(),
|
TsNs: time.Now().UnixNano(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Force another flush
|
// Force another flush
|
||||||
logBuffer.ForceFlush()
|
logBuffer.ForceFlush()
|
||||||
time.Sleep(200 * time.Millisecond) // Wait for flush to complete
|
time.Sleep(200 * time.Millisecond) // Wait for flush to complete
|
||||||
|
|
||||||
// Now check the buffer state
|
// Now check the buffer state
|
||||||
logBuffer.RLock()
|
logBuffer.RLock()
|
||||||
bufferStartOffset := logBuffer.bufferStartOffset
|
bufferStartOffset := logBuffer.bufferStartOffset
|
||||||
currentOffset := logBuffer.offset
|
currentOffset := logBuffer.offset
|
||||||
pos := logBuffer.pos
|
pos := logBuffer.pos
|
||||||
logBuffer.RUnlock()
|
logBuffer.RUnlock()
|
||||||
|
|
||||||
flushMu.Lock()
|
flushMu.Lock()
|
||||||
flushedCount := len(flushedMessages)
|
flushedCount := len(flushedMessages)
|
||||||
var maxFlushedOffset int64 = -1
|
var maxFlushedOffset int64 = -1
|
||||||
@@ -108,23 +109,23 @@ func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) {
|
|||||||
maxFlushedOffset = flushedMessages[flushedCount-1].Offset
|
maxFlushedOffset = flushedMessages[flushedCount-1].Offset
|
||||||
}
|
}
|
||||||
flushMu.Unlock()
|
flushMu.Unlock()
|
||||||
|
|
||||||
t.Logf("\nBUFFER STATE AFTER FLUSH:")
|
t.Logf("\nBUFFER STATE AFTER FLUSH:")
|
||||||
t.Logf(" bufferStartOffset: %d", bufferStartOffset)
|
t.Logf(" bufferStartOffset: %d", bufferStartOffset)
|
||||||
t.Logf(" currentOffset (HWM): %d", currentOffset)
|
t.Logf(" currentOffset (HWM): %d", currentOffset)
|
||||||
t.Logf(" pos (bytes in buffer): %d", pos)
|
t.Logf(" pos (bytes in buffer): %d", pos)
|
||||||
t.Logf(" Messages sent: %d (offsets 0-%d)", messageCount+50, messageCount+49)
|
t.Logf(" Messages sent: %d (offsets 0-%d)", messageCount+50, messageCount+49)
|
||||||
t.Logf(" Messages flushed to disk: %d (offsets %d-%d)", flushedCount, minFlushedOffset, maxFlushedOffset)
|
t.Logf(" Messages flushed to disk: %d (offsets %d-%d)", flushedCount, minFlushedOffset, maxFlushedOffset)
|
||||||
|
|
||||||
// CRITICAL CHECK: Is there a gap between flushed data and memory buffer?
|
// CRITICAL CHECK: Is there a gap between flushed data and memory buffer?
|
||||||
if flushedCount > 0 && maxFlushedOffset >= 0 {
|
if flushedCount > 0 && maxFlushedOffset >= 0 {
|
||||||
gap := bufferStartOffset - (maxFlushedOffset + 1)
|
gap := bufferStartOffset - (maxFlushedOffset + 1)
|
||||||
|
|
||||||
t.Logf("\nOFFSET CONTINUITY CHECK:")
|
t.Logf("\nOFFSET CONTINUITY CHECK:")
|
||||||
t.Logf(" Last flushed offset: %d", maxFlushedOffset)
|
t.Logf(" Last flushed offset: %d", maxFlushedOffset)
|
||||||
t.Logf(" Buffer starts at: %d", bufferStartOffset)
|
t.Logf(" Buffer starts at: %d", bufferStartOffset)
|
||||||
t.Logf(" Gap: %d offsets", gap)
|
t.Logf(" Gap: %d offsets", gap)
|
||||||
|
|
||||||
if gap > 0 {
|
if gap > 0 {
|
||||||
t.Errorf("❌ CRITICAL BUG REPRODUCED: OFFSET GAP DETECTED!")
|
t.Errorf("❌ CRITICAL BUG REPRODUCED: OFFSET GAP DETECTED!")
|
||||||
t.Errorf(" Disk has offsets %d-%d", minFlushedOffset, maxFlushedOffset)
|
t.Errorf(" Disk has offsets %d-%d", minFlushedOffset, maxFlushedOffset)
|
||||||
@@ -137,22 +138,22 @@ func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
t.Logf("✅ PASS: No gap detected - offsets are continuous")
|
t.Logf("✅ PASS: No gap detected - offsets are continuous")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we can read all expected offsets
|
// Check if we can read all expected offsets
|
||||||
t.Logf("\nREADABILITY CHECK:")
|
t.Logf("\nREADABILITY CHECK:")
|
||||||
for testOffset := int64(0); testOffset < currentOffset; testOffset += 10 {
|
for testOffset := int64(0); testOffset < currentOffset; testOffset += 10 {
|
||||||
// Try to read from buffer
|
// Try to read from buffer
|
||||||
requestPosition := NewMessagePositionFromOffset(testOffset)
|
requestPosition := NewMessagePositionFromOffset(testOffset)
|
||||||
buf, _, err := logBuffer.ReadFromBuffer(requestPosition)
|
buf, _, err := logBuffer.ReadFromBuffer(requestPosition)
|
||||||
|
|
||||||
isReadable := (buf != nil && len(buf.Bytes()) > 0) || err == ResumeFromDiskError
|
isReadable := (buf != nil && len(buf.Bytes()) > 0) || err == ResumeFromDiskError
|
||||||
status := "✅"
|
status := "✅"
|
||||||
if !isReadable && err == nil {
|
if !isReadable && err == nil {
|
||||||
status = "❌ NOT READABLE"
|
status = "❌ NOT READABLE"
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logf(" Offset %d: %s (buf=%v, err=%v)", testOffset, status, buf != nil, err)
|
t.Logf(" Offset %d: %s (buf=%v, err=%v)", testOffset, status, buf != nil, err)
|
||||||
|
|
||||||
// If offset is in the gap, it should fail to read
|
// If offset is in the gap, it should fail to read
|
||||||
if flushedCount > 0 && testOffset > maxFlushedOffset && testOffset < bufferStartOffset {
|
if flushedCount > 0 && testOffset > maxFlushedOffset && testOffset < bufferStartOffset {
|
||||||
if isReadable {
|
if isReadable {
|
||||||
@@ -163,19 +164,19 @@ func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that all sent messages are accounted for
|
// Check that all sent messages are accounted for
|
||||||
expectedMessageCount := messageCount + 50
|
expectedMessageCount := messageCount + 50
|
||||||
messagesInMemory := int(currentOffset - bufferStartOffset)
|
messagesInMemory := int(currentOffset - bufferStartOffset)
|
||||||
totalAccountedFor := flushedCount + messagesInMemory
|
totalAccountedFor := flushedCount + messagesInMemory
|
||||||
|
|
||||||
t.Logf("\nMESSAGE ACCOUNTING:")
|
t.Logf("\nMESSAGE ACCOUNTING:")
|
||||||
t.Logf(" Expected: %d messages", expectedMessageCount)
|
t.Logf(" Expected: %d messages", expectedMessageCount)
|
||||||
t.Logf(" Flushed to disk: %d", flushedCount)
|
t.Logf(" Flushed to disk: %d", flushedCount)
|
||||||
t.Logf(" In memory buffer: %d (offset range %d-%d)", messagesInMemory, bufferStartOffset, currentOffset-1)
|
t.Logf(" In memory buffer: %d (offset range %d-%d)", messagesInMemory, bufferStartOffset, currentOffset-1)
|
||||||
t.Logf(" Total accounted for: %d", totalAccountedFor)
|
t.Logf(" Total accounted for: %d", totalAccountedFor)
|
||||||
t.Logf(" Missing: %d messages", expectedMessageCount-totalAccountedFor)
|
t.Logf(" Missing: %d messages", expectedMessageCount-totalAccountedFor)
|
||||||
|
|
||||||
if totalAccountedFor < expectedMessageCount {
|
if totalAccountedFor < expectedMessageCount {
|
||||||
t.Errorf("❌ DATA LOSS CONFIRMED: %d messages are missing!", expectedMessageCount-totalAccountedFor)
|
t.Errorf("❌ DATA LOSS CONFIRMED: %d messages are missing!", expectedMessageCount-totalAccountedFor)
|
||||||
} else {
|
} else {
|
||||||
@@ -188,23 +189,23 @@ func TestFlushOffsetGap_ReproduceDataLoss(t *testing.T) {
|
|||||||
func TestFlushOffsetGap_CheckPrevBuffers(t *testing.T) {
|
func TestFlushOffsetGap_CheckPrevBuffers(t *testing.T) {
|
||||||
var flushCount int
|
var flushCount int
|
||||||
var flushMu sync.Mutex
|
var flushMu sync.Mutex
|
||||||
|
|
||||||
flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
|
flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
|
||||||
flushMu.Lock()
|
flushMu.Lock()
|
||||||
flushCount++
|
flushCount++
|
||||||
count := flushCount
|
count := flushCount
|
||||||
flushMu.Unlock()
|
flushMu.Unlock()
|
||||||
|
|
||||||
t.Logf("FLUSH #%d: minOffset=%d maxOffset=%d size=%d bytes", count, minOffset, maxOffset, len(buf))
|
t.Logf("FLUSH #%d: minOffset=%d maxOffset=%d size=%d bytes", count, minOffset, maxOffset, len(buf))
|
||||||
}
|
}
|
||||||
|
|
||||||
logBuffer := NewLogBuffer("test", 100*time.Millisecond, flushFn, nil, nil)
|
logBuffer := NewLogBuffer("test", 100*time.Millisecond, flushFn, nil, nil)
|
||||||
defer logBuffer.ShutdownLogBuffer()
|
defer logBuffer.ShutdownLogBuffer()
|
||||||
|
|
||||||
// Send messages in batches with flushes in between
|
// Send messages in batches with flushes in between
|
||||||
for batch := 0; batch < 5; batch++ {
|
for batch := 0; batch < 5; batch++ {
|
||||||
t.Logf("\nBatch %d:", batch)
|
t.Logf("\nBatch %d:", batch)
|
||||||
|
|
||||||
// Send 20 messages
|
// Send 20 messages
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
offset := int64(batch*20 + i)
|
offset := int64(batch*20 + i)
|
||||||
@@ -214,28 +215,28 @@ func TestFlushOffsetGap_CheckPrevBuffers(t *testing.T) {
|
|||||||
TsNs: time.Now().UnixNano(),
|
TsNs: time.Now().UnixNano(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check state before flush
|
// Check state before flush
|
||||||
logBuffer.RLock()
|
logBuffer.RLock()
|
||||||
beforeFlushOffset := logBuffer.offset
|
beforeFlushOffset := logBuffer.offset
|
||||||
beforeFlushStart := logBuffer.bufferStartOffset
|
beforeFlushStart := logBuffer.bufferStartOffset
|
||||||
logBuffer.RUnlock()
|
logBuffer.RUnlock()
|
||||||
|
|
||||||
// Force flush
|
// Force flush
|
||||||
logBuffer.ForceFlush()
|
logBuffer.ForceFlush()
|
||||||
time.Sleep(50 * time.Millisecond)
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
|
||||||
// Check state after flush
|
// Check state after flush
|
||||||
logBuffer.RLock()
|
logBuffer.RLock()
|
||||||
afterFlushOffset := logBuffer.offset
|
afterFlushOffset := logBuffer.offset
|
||||||
afterFlushStart := logBuffer.bufferStartOffset
|
afterFlushStart := logBuffer.bufferStartOffset
|
||||||
prevBufferCount := len(logBuffer.prevBuffers.buffers)
|
prevBufferCount := len(logBuffer.prevBuffers.buffers)
|
||||||
|
|
||||||
// Check prevBuffers state
|
// Check prevBuffers state
|
||||||
t.Logf(" Before flush: offset=%d, bufferStartOffset=%d", beforeFlushOffset, beforeFlushStart)
|
t.Logf(" Before flush: offset=%d, bufferStartOffset=%d", beforeFlushOffset, beforeFlushStart)
|
||||||
t.Logf(" After flush: offset=%d, bufferStartOffset=%d, prevBuffers=%d",
|
t.Logf(" After flush: offset=%d, bufferStartOffset=%d, prevBuffers=%d",
|
||||||
afterFlushOffset, afterFlushStart, prevBufferCount)
|
afterFlushOffset, afterFlushStart, prevBufferCount)
|
||||||
|
|
||||||
// Check each prevBuffer
|
// Check each prevBuffer
|
||||||
for i, prevBuf := range logBuffer.prevBuffers.buffers {
|
for i, prevBuf := range logBuffer.prevBuffers.buffers {
|
||||||
if prevBuf.size > 0 {
|
if prevBuf.size > 0 {
|
||||||
@@ -244,7 +245,7 @@ func TestFlushOffsetGap_CheckPrevBuffers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
logBuffer.RUnlock()
|
logBuffer.RUnlock()
|
||||||
|
|
||||||
// CRITICAL: Check if bufferStartOffset advanced correctly
|
// CRITICAL: Check if bufferStartOffset advanced correctly
|
||||||
expectedNewStart := beforeFlushOffset
|
expectedNewStart := beforeFlushOffset
|
||||||
if afterFlushStart != expectedNewStart {
|
if afterFlushStart != expectedNewStart {
|
||||||
@@ -261,10 +262,10 @@ func TestFlushOffsetGap_CheckPrevBuffers(t *testing.T) {
|
|||||||
func TestFlushOffsetGap_ConcurrentWriteAndFlush(t *testing.T) {
|
func TestFlushOffsetGap_ConcurrentWriteAndFlush(t *testing.T) {
|
||||||
var allFlushedOffsets []int64
|
var allFlushedOffsets []int64
|
||||||
var flushMu sync.Mutex
|
var flushMu sync.Mutex
|
||||||
|
|
||||||
flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
|
flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
|
||||||
t.Logf("FLUSH: offsets %d-%d (%d bytes)", minOffset, maxOffset, len(buf))
|
t.Logf("FLUSH: offsets %d-%d (%d bytes)", minOffset, maxOffset, len(buf))
|
||||||
|
|
||||||
flushMu.Lock()
|
flushMu.Lock()
|
||||||
// Record the offset range that was flushed
|
// Record the offset range that was flushed
|
||||||
for offset := minOffset; offset <= maxOffset; offset++ {
|
for offset := minOffset; offset <= maxOffset; offset++ {
|
||||||
@@ -272,13 +273,13 @@ func TestFlushOffsetGap_ConcurrentWriteAndFlush(t *testing.T) {
|
|||||||
}
|
}
|
||||||
flushMu.Unlock()
|
flushMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
logBuffer := NewLogBuffer("test", 50*time.Millisecond, flushFn, nil, nil)
|
logBuffer := NewLogBuffer("test", 50*time.Millisecond, flushFn, nil, nil)
|
||||||
defer logBuffer.ShutdownLogBuffer()
|
defer logBuffer.ShutdownLogBuffer()
|
||||||
|
|
||||||
// Concurrently write messages and force flushes
|
// Concurrently write messages and force flushes
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
// Writer goroutine
|
// Writer goroutine
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -294,7 +295,7 @@ func TestFlushOffsetGap_ConcurrentWriteAndFlush(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Flusher goroutine
|
// Flusher goroutine
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -304,31 +305,31 @@ func TestFlushOffsetGap_ConcurrentWriteAndFlush(t *testing.T) {
|
|||||||
logBuffer.ForceFlush()
|
logBuffer.ForceFlush()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
time.Sleep(200 * time.Millisecond) // Wait for final flush
|
time.Sleep(200 * time.Millisecond) // Wait for final flush
|
||||||
|
|
||||||
// Check final state
|
// Check final state
|
||||||
logBuffer.RLock()
|
logBuffer.RLock()
|
||||||
finalOffset := logBuffer.offset
|
finalOffset := logBuffer.offset
|
||||||
finalBufferStart := logBuffer.bufferStartOffset
|
finalBufferStart := logBuffer.bufferStartOffset
|
||||||
logBuffer.RUnlock()
|
logBuffer.RUnlock()
|
||||||
|
|
||||||
flushMu.Lock()
|
flushMu.Lock()
|
||||||
flushedCount := len(allFlushedOffsets)
|
flushedCount := len(allFlushedOffsets)
|
||||||
flushMu.Unlock()
|
flushMu.Unlock()
|
||||||
|
|
||||||
expectedCount := int(finalOffset)
|
expectedCount := int(finalOffset)
|
||||||
inMemory := int(finalOffset - finalBufferStart)
|
inMemory := int(finalOffset - finalBufferStart)
|
||||||
totalAccountedFor := flushedCount + inMemory
|
totalAccountedFor := flushedCount + inMemory
|
||||||
|
|
||||||
t.Logf("\nFINAL STATE:")
|
t.Logf("\nFINAL STATE:")
|
||||||
t.Logf(" Total messages sent: %d (offsets 0-%d)", expectedCount, expectedCount-1)
|
t.Logf(" Total messages sent: %d (offsets 0-%d)", expectedCount, expectedCount-1)
|
||||||
t.Logf(" Flushed to disk: %d", flushedCount)
|
t.Logf(" Flushed to disk: %d", flushedCount)
|
||||||
t.Logf(" In memory: %d (offsets %d-%d)", inMemory, finalBufferStart, finalOffset-1)
|
t.Logf(" In memory: %d (offsets %d-%d)", inMemory, finalBufferStart, finalOffset-1)
|
||||||
t.Logf(" Total accounted: %d", totalAccountedFor)
|
t.Logf(" Total accounted: %d", totalAccountedFor)
|
||||||
t.Logf(" Missing: %d", expectedCount-totalAccountedFor)
|
t.Logf(" Missing: %d", expectedCount-totalAccountedFor)
|
||||||
|
|
||||||
if totalAccountedFor < expectedCount {
|
if totalAccountedFor < expectedCount {
|
||||||
t.Errorf("❌ DATA LOSS in concurrent scenario: %d messages missing!", expectedCount-totalAccountedFor)
|
t.Errorf("❌ DATA LOSS in concurrent scenario: %d messages missing!", expectedCount-totalAccountedFor)
|
||||||
}
|
}
|
||||||
@@ -344,7 +345,7 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) {
|
|||||||
messages []*filer_pb.LogEntry
|
messages []*filer_pb.LogEntry
|
||||||
}
|
}
|
||||||
var flushMu sync.Mutex
|
var flushMu sync.Mutex
|
||||||
|
|
||||||
flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
|
flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
|
||||||
// Parse messages from buffer
|
// Parse messages from buffer
|
||||||
messages := []*filer_pb.LogEntry{}
|
messages := []*filer_pb.LogEntry{}
|
||||||
@@ -360,7 +361,7 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) {
|
|||||||
}
|
}
|
||||||
pos += 4 + int(size)
|
pos += 4 + int(size)
|
||||||
}
|
}
|
||||||
|
|
||||||
flushMu.Lock()
|
flushMu.Lock()
|
||||||
flushedData = append(flushedData, struct {
|
flushedData = append(flushedData, struct {
|
||||||
minOffset int64
|
minOffset int64
|
||||||
@@ -368,17 +369,17 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) {
|
|||||||
messages []*filer_pb.LogEntry
|
messages []*filer_pb.LogEntry
|
||||||
}{minOffset, maxOffset, messages})
|
}{minOffset, maxOffset, messages})
|
||||||
flushMu.Unlock()
|
flushMu.Unlock()
|
||||||
|
|
||||||
t.Logf("FLUSH: minOffset=%d maxOffset=%d, parsed %d messages", minOffset, maxOffset, len(messages))
|
t.Logf("FLUSH: minOffset=%d maxOffset=%d, parsed %d messages", minOffset, maxOffset, len(messages))
|
||||||
}
|
}
|
||||||
|
|
||||||
logBuffer := NewLogBuffer("test", time.Hour, flushFn, nil, nil)
|
logBuffer := NewLogBuffer("test", time.Hour, flushFn, nil, nil)
|
||||||
defer logBuffer.ShutdownLogBuffer()
|
defer logBuffer.ShutdownLogBuffer()
|
||||||
|
|
||||||
// Simulate broker behavior: assign Kafka offsets and add to buffer
|
// Simulate broker behavior: assign Kafka offsets and add to buffer
|
||||||
// This is what PublishWithOffset() does
|
// This is what PublishWithOffset() does
|
||||||
nextKafkaOffset := int64(0)
|
nextKafkaOffset := int64(0)
|
||||||
|
|
||||||
// Round 1: Add 50 messages with Kafka offsets 0-49
|
// Round 1: Add 50 messages with Kafka offsets 0-49
|
||||||
t.Logf("\n=== ROUND 1: Adding messages 0-49 ===")
|
t.Logf("\n=== ROUND 1: Adding messages 0-49 ===")
|
||||||
for i := 0; i < 50; i++ {
|
for i := 0; i < 50; i++ {
|
||||||
@@ -391,7 +392,7 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) {
|
|||||||
logBuffer.AddLogEntryToBuffer(logEntry)
|
logBuffer.AddLogEntryToBuffer(logEntry)
|
||||||
nextKafkaOffset++
|
nextKafkaOffset++
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check buffer state before flush
|
// Check buffer state before flush
|
||||||
logBuffer.RLock()
|
logBuffer.RLock()
|
||||||
beforeFlushOffset := logBuffer.offset
|
beforeFlushOffset := logBuffer.offset
|
||||||
@@ -399,11 +400,11 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) {
|
|||||||
logBuffer.RUnlock()
|
logBuffer.RUnlock()
|
||||||
t.Logf("Before flush: logBuffer.offset=%d, bufferStartOffset=%d, nextKafkaOffset=%d",
|
t.Logf("Before flush: logBuffer.offset=%d, bufferStartOffset=%d, nextKafkaOffset=%d",
|
||||||
beforeFlushOffset, beforeFlushStart, nextKafkaOffset)
|
beforeFlushOffset, beforeFlushStart, nextKafkaOffset)
|
||||||
|
|
||||||
// Flush
|
// Flush
|
||||||
logBuffer.ForceFlush()
|
logBuffer.ForceFlush()
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
// Check buffer state after flush
|
// Check buffer state after flush
|
||||||
logBuffer.RLock()
|
logBuffer.RLock()
|
||||||
afterFlushOffset := logBuffer.offset
|
afterFlushOffset := logBuffer.offset
|
||||||
@@ -411,7 +412,7 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) {
|
|||||||
logBuffer.RUnlock()
|
logBuffer.RUnlock()
|
||||||
t.Logf("After flush: logBuffer.offset=%d, bufferStartOffset=%d",
|
t.Logf("After flush: logBuffer.offset=%d, bufferStartOffset=%d",
|
||||||
afterFlushOffset, afterFlushStart)
|
afterFlushOffset, afterFlushStart)
|
||||||
|
|
||||||
// Round 2: Add another 50 messages with Kafka offsets 50-99
|
// Round 2: Add another 50 messages with Kafka offsets 50-99
|
||||||
t.Logf("\n=== ROUND 2: Adding messages 50-99 ===")
|
t.Logf("\n=== ROUND 2: Adding messages 50-99 ===")
|
||||||
for i := 0; i < 50; i++ {
|
for i := 0; i < 50; i++ {
|
||||||
@@ -424,20 +425,20 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) {
|
|||||||
logBuffer.AddLogEntryToBuffer(logEntry)
|
logBuffer.AddLogEntryToBuffer(logEntry)
|
||||||
nextKafkaOffset++
|
nextKafkaOffset++
|
||||||
}
|
}
|
||||||
|
|
||||||
logBuffer.ForceFlush()
|
logBuffer.ForceFlush()
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
// Verification: Check if all Kafka offsets are accounted for
|
// Verification: Check if all Kafka offsets are accounted for
|
||||||
flushMu.Lock()
|
flushMu.Lock()
|
||||||
t.Logf("\n=== VERIFICATION ===")
|
t.Logf("\n=== VERIFICATION ===")
|
||||||
t.Logf("Expected Kafka offsets: 0-%d", nextKafkaOffset-1)
|
t.Logf("Expected Kafka offsets: 0-%d", nextKafkaOffset-1)
|
||||||
|
|
||||||
allOffsets := make(map[int64]bool)
|
allOffsets := make(map[int64]bool)
|
||||||
for flushIdx, flush := range flushedData {
|
for flushIdx, flush := range flushedData {
|
||||||
t.Logf("Flush #%d: minOffset=%d, maxOffset=%d, messages=%d",
|
t.Logf("Flush #%d: minOffset=%d, maxOffset=%d, messages=%d",
|
||||||
flushIdx, flush.minOffset, flush.maxOffset, len(flush.messages))
|
flushIdx, flush.minOffset, flush.maxOffset, len(flush.messages))
|
||||||
|
|
||||||
for _, msg := range flush.messages {
|
for _, msg := range flush.messages {
|
||||||
if allOffsets[msg.Offset] {
|
if allOffsets[msg.Offset] {
|
||||||
t.Errorf(" ❌ DUPLICATE: Offset %d appears multiple times!", msg.Offset)
|
t.Errorf(" ❌ DUPLICATE: Offset %d appears multiple times!", msg.Offset)
|
||||||
@@ -446,7 +447,7 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
flushMu.Unlock()
|
flushMu.Unlock()
|
||||||
|
|
||||||
// Check for missing offsets
|
// Check for missing offsets
|
||||||
missingOffsets := []int64{}
|
missingOffsets := []int64{}
|
||||||
for expectedOffset := int64(0); expectedOffset < nextKafkaOffset; expectedOffset++ {
|
for expectedOffset := int64(0); expectedOffset < nextKafkaOffset; expectedOffset++ {
|
||||||
@@ -454,7 +455,7 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) {
|
|||||||
missingOffsets = append(missingOffsets, expectedOffset)
|
missingOffsets = append(missingOffsets, expectedOffset)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(missingOffsets) > 0 {
|
if len(missingOffsets) > 0 {
|
||||||
t.Errorf("\n❌ MISSING OFFSETS DETECTED: %d offsets missing", len(missingOffsets))
|
t.Errorf("\n❌ MISSING OFFSETS DETECTED: %d offsets missing", len(missingOffsets))
|
||||||
if len(missingOffsets) <= 20 {
|
if len(missingOffsets) <= 20 {
|
||||||
@@ -466,18 +467,18 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
t.Logf("\n✅ SUCCESS: All %d Kafka offsets accounted for (0-%d)", nextKafkaOffset, nextKafkaOffset-1)
|
t.Logf("\n✅ SUCCESS: All %d Kafka offsets accounted for (0-%d)", nextKafkaOffset, nextKafkaOffset-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check buffer offset consistency
|
// Check buffer offset consistency
|
||||||
logBuffer.RLock()
|
logBuffer.RLock()
|
||||||
finalOffset := logBuffer.offset
|
finalOffset := logBuffer.offset
|
||||||
finalBufferStart := logBuffer.bufferStartOffset
|
finalBufferStart := logBuffer.bufferStartOffset
|
||||||
logBuffer.RUnlock()
|
logBuffer.RUnlock()
|
||||||
|
|
||||||
t.Logf("\nFinal buffer state:")
|
t.Logf("\nFinal buffer state:")
|
||||||
t.Logf(" logBuffer.offset: %d", finalOffset)
|
t.Logf(" logBuffer.offset: %d", finalOffset)
|
||||||
t.Logf(" bufferStartOffset: %d", finalBufferStart)
|
t.Logf(" bufferStartOffset: %d", finalBufferStart)
|
||||||
t.Logf(" Expected (nextKafkaOffset): %d", nextKafkaOffset)
|
t.Logf(" Expected (nextKafkaOffset): %d", nextKafkaOffset)
|
||||||
|
|
||||||
if finalOffset != nextKafkaOffset {
|
if finalOffset != nextKafkaOffset {
|
||||||
t.Errorf("❌ logBuffer.offset mismatch: expected %d, got %d", nextKafkaOffset, finalOffset)
|
t.Errorf("❌ logBuffer.offset mismatch: expected %d, got %d", nextKafkaOffset, finalOffset)
|
||||||
}
|
}
|
||||||
@@ -488,12 +489,12 @@ func TestFlushOffsetGap_ProductionScenario(t *testing.T) {
|
|||||||
func TestFlushOffsetGap_ConcurrentReadDuringFlush(t *testing.T) {
|
func TestFlushOffsetGap_ConcurrentReadDuringFlush(t *testing.T) {
|
||||||
var flushedOffsets []int64
|
var flushedOffsets []int64
|
||||||
var flushMu sync.Mutex
|
var flushMu sync.Mutex
|
||||||
|
|
||||||
readFromDiskFn := func(startPosition MessagePosition, stopTsNs int64, eachLogEntryFn EachLogEntryFuncType) (MessagePosition, bool, error) {
|
readFromDiskFn := func(startPosition MessagePosition, stopTsNs int64, eachLogEntryFn EachLogEntryFuncType) (MessagePosition, bool, error) {
|
||||||
// Simulate reading from disk - return flushed offsets
|
// Simulate reading from disk - return flushed offsets
|
||||||
flushMu.Lock()
|
flushMu.Lock()
|
||||||
defer flushMu.Unlock()
|
defer flushMu.Unlock()
|
||||||
|
|
||||||
for _, offset := range flushedOffsets {
|
for _, offset := range flushedOffsets {
|
||||||
if offset >= startPosition.Offset {
|
if offset >= startPosition.Offset {
|
||||||
logEntry := &filer_pb.LogEntry{
|
logEntry := &filer_pb.LogEntry{
|
||||||
@@ -510,12 +511,12 @@ func TestFlushOffsetGap_ConcurrentReadDuringFlush(t *testing.T) {
|
|||||||
}
|
}
|
||||||
return startPosition, false, nil
|
return startPosition, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
|
flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
|
||||||
// Parse and store flushed offsets
|
// Parse and store flushed offsets
|
||||||
flushMu.Lock()
|
flushMu.Lock()
|
||||||
defer flushMu.Unlock()
|
defer flushMu.Unlock()
|
||||||
|
|
||||||
for pos := 0; pos+4 < len(buf); {
|
for pos := 0; pos+4 < len(buf); {
|
||||||
size := uint32(buf[pos])<<24 | uint32(buf[pos+1])<<16 | uint32(buf[pos+2])<<8 | uint32(buf[pos+3])
|
size := uint32(buf[pos])<<24 | uint32(buf[pos+1])<<16 | uint32(buf[pos+2])<<8 | uint32(buf[pos+3])
|
||||||
if pos+4+int(size) > len(buf) {
|
if pos+4+int(size) > len(buf) {
|
||||||
@@ -528,14 +529,14 @@ func TestFlushOffsetGap_ConcurrentReadDuringFlush(t *testing.T) {
|
|||||||
}
|
}
|
||||||
pos += 4 + int(size)
|
pos += 4 + int(size)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logf("FLUSH: Stored %d offsets to disk (minOffset=%d, maxOffset=%d)",
|
t.Logf("FLUSH: Stored %d offsets to disk (minOffset=%d, maxOffset=%d)",
|
||||||
len(flushedOffsets), minOffset, maxOffset)
|
len(flushedOffsets), minOffset, maxOffset)
|
||||||
}
|
}
|
||||||
|
|
||||||
logBuffer := NewLogBuffer("test", time.Hour, flushFn, readFromDiskFn, nil)
|
logBuffer := NewLogBuffer("test", time.Hour, flushFn, readFromDiskFn, nil)
|
||||||
defer logBuffer.ShutdownLogBuffer()
|
defer logBuffer.ShutdownLogBuffer()
|
||||||
|
|
||||||
// Add 100 messages
|
// Add 100 messages
|
||||||
t.Logf("Adding 100 messages...")
|
t.Logf("Adding 100 messages...")
|
||||||
for i := int64(0); i < 100; i++ {
|
for i := int64(0); i < 100; i++ {
|
||||||
@@ -547,32 +548,32 @@ func TestFlushOffsetGap_ConcurrentReadDuringFlush(t *testing.T) {
|
|||||||
}
|
}
|
||||||
logBuffer.AddLogEntryToBuffer(logEntry)
|
logBuffer.AddLogEntryToBuffer(logEntry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flush (moves data to disk)
|
// Flush (moves data to disk)
|
||||||
t.Logf("Flushing...")
|
t.Logf("Flushing...")
|
||||||
logBuffer.ForceFlush()
|
logBuffer.ForceFlush()
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
// Now try to read all messages using ReadMessagesAtOffset
|
// Now try to read all messages using ReadMessagesAtOffset
|
||||||
t.Logf("\nReading messages from offset 0...")
|
t.Logf("\nReading messages from offset 0...")
|
||||||
messages, nextOffset, hwm, endOfPartition, err := logBuffer.ReadMessagesAtOffset(0, 1000, 1024*1024)
|
messages, nextOffset, hwm, endOfPartition, err := logBuffer.ReadMessagesAtOffset(0, 1000, 1024*1024)
|
||||||
|
|
||||||
t.Logf("Read result: messages=%d, nextOffset=%d, hwm=%d, endOfPartition=%v, err=%v",
|
t.Logf("Read result: messages=%d, nextOffset=%d, hwm=%d, endOfPartition=%v, err=%v",
|
||||||
len(messages), nextOffset, hwm, endOfPartition, err)
|
len(messages), nextOffset, hwm, endOfPartition, err)
|
||||||
|
|
||||||
// Verify all offsets can be read
|
// Verify all offsets can be read
|
||||||
readOffsets := make(map[int64]bool)
|
readOffsets := make(map[int64]bool)
|
||||||
for _, msg := range messages {
|
for _, msg := range messages {
|
||||||
readOffsets[msg.Offset] = true
|
readOffsets[msg.Offset] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
missingOffsets := []int64{}
|
missingOffsets := []int64{}
|
||||||
for expectedOffset := int64(0); expectedOffset < 100; expectedOffset++ {
|
for expectedOffset := int64(0); expectedOffset < 100; expectedOffset++ {
|
||||||
if !readOffsets[expectedOffset] {
|
if !readOffsets[expectedOffset] {
|
||||||
missingOffsets = append(missingOffsets, expectedOffset)
|
missingOffsets = append(missingOffsets, expectedOffset)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(missingOffsets) > 0 {
|
if len(missingOffsets) > 0 {
|
||||||
t.Errorf("❌ MISSING OFFSETS after flush: %d offsets cannot be read", len(missingOffsets))
|
t.Errorf("❌ MISSING OFFSETS after flush: %d offsets cannot be read", len(missingOffsets))
|
||||||
if len(missingOffsets) <= 20 {
|
if len(missingOffsets) <= 20 {
|
||||||
@@ -590,29 +591,29 @@ func TestFlushOffsetGap_ConcurrentReadDuringFlush(t *testing.T) {
|
|||||||
func TestFlushOffsetGap_ForceFlushAdvancesBuffer(t *testing.T) {
|
func TestFlushOffsetGap_ForceFlushAdvancesBuffer(t *testing.T) {
|
||||||
flushedRanges := []struct{ min, max int64 }{}
|
flushedRanges := []struct{ min, max int64 }{}
|
||||||
var flushMu sync.Mutex
|
var flushMu sync.Mutex
|
||||||
|
|
||||||
flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
|
flushFn := func(logBuffer *LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
|
||||||
flushMu.Lock()
|
flushMu.Lock()
|
||||||
flushedRanges = append(flushedRanges, struct{ min, max int64 }{minOffset, maxOffset})
|
flushedRanges = append(flushedRanges, struct{ min, max int64 }{minOffset, maxOffset})
|
||||||
flushMu.Unlock()
|
flushMu.Unlock()
|
||||||
t.Logf("FLUSH: offsets %d-%d", minOffset, maxOffset)
|
t.Logf("FLUSH: offsets %d-%d", minOffset, maxOffset)
|
||||||
}
|
}
|
||||||
|
|
||||||
logBuffer := NewLogBuffer("test", time.Hour, flushFn, nil, nil) // Long interval, manual flush only
|
logBuffer := NewLogBuffer("test", time.Hour, flushFn, nil, nil) // Long interval, manual flush only
|
||||||
defer logBuffer.ShutdownLogBuffer()
|
defer logBuffer.ShutdownLogBuffer()
|
||||||
|
|
||||||
// Send messages, flush, check state - repeat
|
// Send messages, flush, check state - repeat
|
||||||
for round := 0; round < 3; round++ {
|
for round := 0; round < 3; round++ {
|
||||||
t.Logf("\n=== ROUND %d ===", round)
|
t.Logf("\n=== ROUND %d ===", round)
|
||||||
|
|
||||||
// Check state before adding messages
|
// Check state before adding messages
|
||||||
logBuffer.RLock()
|
logBuffer.RLock()
|
||||||
beforeOffset := logBuffer.offset
|
beforeOffset := logBuffer.offset
|
||||||
beforeStart := logBuffer.bufferStartOffset
|
beforeStart := logBuffer.bufferStartOffset
|
||||||
logBuffer.RUnlock()
|
logBuffer.RUnlock()
|
||||||
|
|
||||||
t.Logf("Before adding: offset=%d, bufferStartOffset=%d", beforeOffset, beforeStart)
|
t.Logf("Before adding: offset=%d, bufferStartOffset=%d", beforeOffset, beforeStart)
|
||||||
|
|
||||||
// Add 10 messages
|
// Add 10 messages
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
logBuffer.AddToBuffer(&mq_pb.DataMessage{
|
logBuffer.AddToBuffer(&mq_pb.DataMessage{
|
||||||
@@ -621,28 +622,28 @@ func TestFlushOffsetGap_ForceFlushAdvancesBuffer(t *testing.T) {
|
|||||||
TsNs: time.Now().UnixNano(),
|
TsNs: time.Now().UnixNano(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check state after adding
|
// Check state after adding
|
||||||
logBuffer.RLock()
|
logBuffer.RLock()
|
||||||
afterAddOffset := logBuffer.offset
|
afterAddOffset := logBuffer.offset
|
||||||
afterAddStart := logBuffer.bufferStartOffset
|
afterAddStart := logBuffer.bufferStartOffset
|
||||||
logBuffer.RUnlock()
|
logBuffer.RUnlock()
|
||||||
|
|
||||||
t.Logf("After adding: offset=%d, bufferStartOffset=%d", afterAddOffset, afterAddStart)
|
t.Logf("After adding: offset=%d, bufferStartOffset=%d", afterAddOffset, afterAddStart)
|
||||||
|
|
||||||
// Force flush
|
// Force flush
|
||||||
t.Logf("Forcing flush...")
|
t.Logf("Forcing flush...")
|
||||||
logBuffer.ForceFlush()
|
logBuffer.ForceFlush()
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
// Check state after flush
|
// Check state after flush
|
||||||
logBuffer.RLock()
|
logBuffer.RLock()
|
||||||
afterFlushOffset := logBuffer.offset
|
afterFlushOffset := logBuffer.offset
|
||||||
afterFlushStart := logBuffer.bufferStartOffset
|
afterFlushStart := logBuffer.bufferStartOffset
|
||||||
logBuffer.RUnlock()
|
logBuffer.RUnlock()
|
||||||
|
|
||||||
t.Logf("After flush: offset=%d, bufferStartOffset=%d", afterFlushOffset, afterFlushStart)
|
t.Logf("After flush: offset=%d, bufferStartOffset=%d", afterFlushOffset, afterFlushStart)
|
||||||
|
|
||||||
// CRITICAL CHECK: bufferStartOffset should advance to where offset was before flush
|
// CRITICAL CHECK: bufferStartOffset should advance to where offset was before flush
|
||||||
if afterFlushStart != afterAddOffset {
|
if afterFlushStart != afterAddOffset {
|
||||||
t.Errorf("❌ FLUSH BUG: bufferStartOffset did NOT advance correctly!")
|
t.Errorf("❌ FLUSH BUG: bufferStartOffset did NOT advance correctly!")
|
||||||
@@ -653,19 +654,19 @@ func TestFlushOffsetGap_ForceFlushAdvancesBuffer(t *testing.T) {
|
|||||||
t.Logf("✅ bufferStartOffset correctly advanced to %d", afterFlushStart)
|
t.Logf("✅ bufferStartOffset correctly advanced to %d", afterFlushStart)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Final verification: check all offset ranges are continuous
|
// Final verification: check all offset ranges are continuous
|
||||||
flushMu.Lock()
|
flushMu.Lock()
|
||||||
t.Logf("\n=== FLUSHED RANGES ===")
|
t.Logf("\n=== FLUSHED RANGES ===")
|
||||||
for i, r := range flushedRanges {
|
for i, r := range flushedRanges {
|
||||||
t.Logf("Flush #%d: offsets %d-%d", i, r.min, r.max)
|
t.Logf("Flush #%d: offsets %d-%d", i, r.min, r.max)
|
||||||
|
|
||||||
// Check continuity with previous flush
|
// Check continuity with previous flush
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
prevMax := flushedRanges[i-1].max
|
prevMax := flushedRanges[i-1].max
|
||||||
currentMin := r.min
|
currentMin := r.min
|
||||||
gap := currentMin - (prevMax + 1)
|
gap := currentMin - (prevMax + 1)
|
||||||
|
|
||||||
if gap > 0 {
|
if gap > 0 {
|
||||||
t.Errorf("❌ GAP between flush #%d and #%d: %d offsets missing!", i-1, i, gap)
|
t.Errorf("❌ GAP between flush #%d and #%d: %d offsets missing!", i-1, i, gap)
|
||||||
} else if gap < 0 {
|
} else if gap < 0 {
|
||||||
@@ -677,4 +678,3 @@ func TestFlushOffsetGap_ForceFlushAdvancesBuffer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
flushMu.Unlock()
|
flushMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ type GrpcAdminClient struct {
|
|||||||
workerID string
|
workerID string
|
||||||
dialOption grpc.DialOption
|
dialOption grpc.DialOption
|
||||||
|
|
||||||
cmds chan grpcCommand
|
cmds chan grpcCommand
|
||||||
|
|
||||||
// Reconnection parameters
|
// Reconnection parameters
|
||||||
maxReconnectAttempts int
|
maxReconnectAttempts int
|
||||||
|
|||||||
Reference in New Issue
Block a user