Files
seaweedFS/weed/filer/filer_notify.go
Chris Lu d97660d0cd filer.sync: pipelined subscription with adaptive batching for faster catch-up (#8791)
* filer.sync: pipelined subscription with adaptive batching for faster catch-up

The SubscribeMetadata pipeline was fully serial: reading a log entry from a
volume server, unmarshaling, filtering, and calling stream.Send() all happened
one-at-a-time. stream.Send() blocked the entire pipeline until the client
acknowledged each event, limiting throughput to ~80 events/sec regardless of
the -concurrency setting.

Three server-side optimizations that stack:

1. Pipelined sender: decouple stream.Send() from the read loop via a buffered
   channel (1024 messages). A dedicated goroutine handles gRPC delivery while
   the reader continues processing the next events.

2. Adaptive batching: when event timestamps are >2min behind wall clock
   (backlog catch-up), drain multiple events from the channel and pack them
   into a single stream.Send() using a new `repeated events` field on
   SubscribeMetadataResponse. When events are recent (real-time), send
   one-by-one for low latency. Old clients ignore the new field (backward
   compatible).

3. Persisted log readahead: run the OrderedLogVisitor in a background
   goroutine so volume server I/O for the next log file overlaps with event
   processing and gRPC delivery.

4. Event-driven aggregated subscription: replace time.Sleep(1127ms) polling
   in SubscribeMetadata with notification-driven wake-up using the
   MetaLogBuffer subscriber mechanism, reducing real-time latency from
   ~1127ms to sub-millisecond.

Combined, these create a 3-stage pipeline:
  [Volume I/O → readahead buffer] → [Filter → send buffer] → [gRPC Send]

Test results (simulated backlog with 50µs gRPC latency per Send):
  direct (old):        2100 events  2100 sends  168ms   12,512 events/sec
  pipelined+batched:   2100 events    14 sends   40ms   52,856 events/sec
  Speedup: 4.2x single-stream throughput

Ref: #8771

* filer.sync: require client opt-in for batch event delivery

Add ClientSupportsBatching field to SubscribeMetadataRequest. The server
only packs events into the Events batch field when the client explicitly
sets this flag to true. Old clients (Java SDK, third-party) that don't
set the flag get one-event-per-Send, preserving backward compatibility.

All Go callers (FollowMetadata, MetaAggregator) set the flag to true
since their recv loops already unpack batched events.

* filer.sync: clear batch Events field after Send to release references

Prevents the envelope message from holding references to the rest of the
batch after gRPC serialization, allowing the GC to collect them sooner.

* filer.sync: fix Send deadlock, add error propagation test, event-driven local subscribe

- pipelinedSender.Send: add case <-s.done to unblock when sender goroutine
  exits (fixes deadlock when errCh was already consumed by a prior Send).
- pipelinedSender.reportErr: remove for-range drain on sendCh that could
  block indefinitely. Send() now detects exit via s.done instead.
- SubscribeLocalMetadata: replace remaining time.Sleep(1127ms) in the
  gap-detected-no-memory-data path with event-driven listenersCond.Wait(),
  consistent with the rest of the subscription paths.
- Add TestPipelinedSenderErrorPropagation: verifies error surfaces via
  Send and Close when the underlying stream fails.
- Replace goto with labeled break in test simulatePipeline.

* filer.sync: check error returns in test code

- direct_send: check slowStream.Send error return
- pipelined_batched_send: check sender.Close error return
- simulatePipeline: return error from sender.Close, propagate to callers

---------

Co-authored-by: Copilot <copilot@github.com>
2026-03-26 23:55:42 -07:00

274 lines
8.0 KiB
Go

package filer
import (
"context"
"errors"
"fmt"
"io"
nethttp "net/http"
"regexp"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
"google.golang.org/protobuf/proto"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/notification"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool, signatures []int32) {
f.notifyUpdateEvent(ctx, oldEntry, newEntry, deleteChunks, isFromOtherCluster, signatures)
}
func (f *Filer) notifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool, signatures []int32) *filer_pb.SubscribeMetadataResponse {
var fullpath string
if oldEntry != nil {
fullpath = string(oldEntry.FullPath)
} else if newEntry != nil {
fullpath = string(newEntry.FullPath)
} else {
return nil
}
// println("fullpath:", fullpath)
if strings.HasPrefix(fullpath, SystemLogDir) {
return nil
}
foundSelf := false
for _, sig := range signatures {
if sig == f.Signature {
foundSelf = true
}
}
if !foundSelf {
signatures = append(signatures, f.Signature)
}
event := f.newMetadataEvent(oldEntry, newEntry, deleteChunks, isFromOtherCluster, signatures)
eventNotification := event.EventNotification
if notification.Queue != nil {
glog.V(3).Infof("notifying entry update %v", fullpath)
if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil {
// throw message
glog.Error(err)
}
}
f.logMetaEvent(ctx, event)
if sink := metadataEventSinkFromContext(ctx); sink != nil {
sink.Record(event)
}
// Trigger empty folder cleanup for local events
// Remote events are handled via MetaAggregator.onMetadataChangeEvent
f.triggerLocalEmptyFolderCleanup(oldEntry, newEntry)
return event
}
func (f *Filer) newMetadataEvent(oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool, signatures []int32) *filer_pb.SubscribeMetadataResponse {
if oldEntry == nil && newEntry == nil {
return nil
}
var fullpath util.FullPath
if oldEntry != nil {
fullpath = oldEntry.FullPath
}
if fullpath == "" && newEntry != nil {
fullpath = newEntry.FullPath
}
dir, _ := fullpath.DirAndName()
newParentPath := ""
if newEntry != nil {
newParentPath, _ = newEntry.FullPath.DirAndName()
}
return &filer_pb.SubscribeMetadataResponse{
Directory: dir,
EventNotification: &filer_pb.EventNotification{
OldEntry: oldEntry.ToProtoEntry(),
NewEntry: newEntry.ToProtoEntry(),
DeleteChunks: deleteChunks,
NewParentPath: newParentPath,
IsFromOtherCluster: isFromOtherCluster,
Signatures: signatures,
},
TsNs: time.Now().UnixNano(),
}
}
func (f *Filer) logMetaEvent(ctx context.Context, event *filer_pb.SubscribeMetadataResponse) {
data, err := proto.Marshal(event)
if err != nil {
glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
return
}
if err := f.LocalMetaLogBuffer.AddDataToBuffer([]byte(event.Directory), data, event.TsNs); err != nil {
glog.Errorf("failed to add data to log buffer for %s: %v", event.Directory, err)
}
}
// triggerLocalEmptyFolderCleanup triggers empty folder cleanup for local events
// This is needed because onMetadataChangeEvent is only called for remote peer events
func (f *Filer) triggerLocalEmptyFolderCleanup(oldEntry, newEntry *Entry) {
if f.EmptyFolderCleaner == nil || !f.EmptyFolderCleaner.IsEnabled() {
return
}
eventTime := time.Now()
// Handle delete events (oldEntry exists, newEntry is nil)
if oldEntry != nil && newEntry == nil {
dir, name := oldEntry.FullPath.DirAndName()
f.EmptyFolderCleaner.OnDeleteEvent(dir, name, oldEntry.IsDirectory(), eventTime)
}
// Handle create events (oldEntry is nil, newEntry exists)
if oldEntry == nil && newEntry != nil {
dir, name := newEntry.FullPath.DirAndName()
f.EmptyFolderCleaner.OnCreateEvent(dir, name, newEntry.IsDirectory())
}
// Handle rename/move events (both exist but paths differ)
if oldEntry != nil && newEntry != nil {
oldDir, oldName := oldEntry.FullPath.DirAndName()
newDir, newName := newEntry.FullPath.DirAndName()
if oldDir != newDir || oldName != newName {
// Treat old location as delete
f.EmptyFolderCleaner.OnDeleteEvent(oldDir, oldName, oldEntry.IsDirectory(), eventTime)
// Treat new location as create
f.EmptyFolderCleaner.OnCreateEvent(newDir, newName, newEntry.IsDirectory())
}
}
}
func (f *Filer) logFlushFunc(logBuffer *log_buffer.LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) {
if len(buf) == 0 {
return
}
startTime, stopTime = startTime.UTC(), stopTime.UTC()
targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.%08x", SystemLogDir,
startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), f.UniqueFilerId,
// startTime.Second(), startTime.Nanosecond(),
)
for {
if err := f.appendToFile(targetFile, buf); err != nil {
glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err)
time.Sleep(737 * time.Millisecond)
} else {
break
}
}
}
var (
volumeNotFoundPattern = regexp.MustCompile(`volume \d+? not found`)
chunkNotFoundPattern = regexp.MustCompile(`(urls not found|File Not Found)`)
httpNotFoundPattern = regexp.MustCompile(`404 Not Found: not found`)
)
// isChunkNotFoundError checks if the error indicates that a volume or chunk
// has been deleted and is no longer available. These errors can be skipped
// when reading persisted log files since the data is unrecoverable.
func isChunkNotFoundError(err error) bool {
if err == nil {
return false
}
if errors.Is(err, util_http.ErrNotFound) || errors.Is(err, nethttp.ErrMissingFile) {
return true
}
errMsg := err.Error()
return volumeNotFoundPattern.MatchString(errMsg) ||
chunkNotFoundPattern.MatchString(errMsg) ||
httpNotFoundPattern.MatchString(errMsg)
}
func (f *Filer) ReadPersistedLogBuffer(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastTsNs int64, isDone bool, err error) {
visitor, visitErr := f.collectPersistedLogBuffer(startPosition, stopTsNs)
if visitErr != nil {
if visitErr == io.EOF {
return
}
err = fmt.Errorf("reading from persisted logs: %w", visitErr)
return
}
// Readahead: run the visitor in a background goroutine so volume server I/O
// for the next log file overlaps with event processing and gRPC delivery.
const readaheadSize = 1024
type entryOrErr struct {
entry *filer_pb.LogEntry
err error
}
ch := make(chan entryOrErr, readaheadSize)
stopReadahead := make(chan struct{})
go func() {
defer close(ch)
for {
entry, readErr := visitor.GetNext()
if readErr != nil {
if readErr != io.EOF {
select {
case ch <- entryOrErr{err: fmt.Errorf("read next from persisted logs: %w", readErr)}:
case <-stopReadahead:
}
}
return
}
select {
case ch <- entryOrErr{entry: entry}:
case <-stopReadahead:
return
}
}
}()
defer close(stopReadahead)
for item := range ch {
if item.err != nil {
err = item.err
return
}
var processErr error
isDone, processErr = eachLogEntryFn(item.entry)
if processErr != nil {
err = fmt.Errorf("process persisted log entry: %w", processErr)
return
}
lastTsNs = item.entry.TsNs
if isDone {
return
}
}
return
}
func (f *Filer) readPersistedLogBufferPosition(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastReadPosition log_buffer.MessagePosition, isDone bool, err error) {
lastReadPosition = startPosition
lastTsNs, isDone, err := f.ReadPersistedLogBuffer(startPosition, stopTsNs, eachLogEntryFn)
if err != nil {
return lastReadPosition, isDone, err
}
if lastTsNs != 0 {
lastReadPosition = log_buffer.NewMessagePosition(lastTsNs, 1)
}
return lastReadPosition, isDone, nil
}