Files
seaweedFS/weed/filer/meta_aggregator.go
Chris Lu d97660d0cd filer.sync: pipelined subscription with adaptive batching for faster catch-up (#8791)
* filer.sync: pipelined subscription with adaptive batching for faster catch-up

The SubscribeMetadata pipeline was fully serial: reading a log entry from a
volume server, unmarshaling, filtering, and calling stream.Send() all happened
one-at-a-time. stream.Send() blocked the entire pipeline until the client
acknowledged each event, limiting throughput to ~80 events/sec regardless of
the -concurrency setting.

Three server-side optimizations that stack:

1. Pipelined sender: decouple stream.Send() from the read loop via a buffered
   channel (1024 messages). A dedicated goroutine handles gRPC delivery while
   the reader continues processing the next events.

2. Adaptive batching: when event timestamps are >2min behind wall clock
   (backlog catch-up), drain multiple events from the channel and pack them
   into a single stream.Send() using a new `repeated events` field on
   SubscribeMetadataResponse. When events are recent (real-time), send
   one-by-one for low latency. Old clients ignore the new field (backward
   compatible).

3. Persisted log readahead: run the OrderedLogVisitor in a background
   goroutine so volume server I/O for the next log file overlaps with event
   processing and gRPC delivery.

4. Event-driven aggregated subscription: replace time.Sleep(1127ms) polling
   in SubscribeMetadata with notification-driven wake-up using the
   MetaLogBuffer subscriber mechanism, reducing real-time latency from
   ~1127ms to sub-millisecond.

Combined, these create a 3-stage pipeline:
  [Volume I/O → readahead buffer] → [Filter → send buffer] → [gRPC Send]

Test results (simulated backlog with 50µs gRPC latency per Send):
  direct (old):        2100 events  2100 sends  168ms   12,512 events/sec
  pipelined+batched:   2100 events    14 sends   40ms   52,856 events/sec
  Speedup: 4.2x single-stream throughput

Ref: #8771

* filer.sync: require client opt-in for batch event delivery

Add ClientSupportsBatching field to SubscribeMetadataRequest. The server
only packs events into the Events batch field when the client explicitly
sets this flag to true. Old clients (Java SDK, third-party) that don't
set the flag get one-event-per-Send, preserving backward compatibility.

All Go callers (FollowMetadata, MetaAggregator) set the flag to true
since their recv loops already unpack batched events.

* filer.sync: clear batch Events field after Send to release references

Prevents the envelope message from holding references to the rest of the
batch after gRPC serialization, allowing the GC to collect them sooner.

* filer.sync: fix Send deadlock, add error propagation test, event-driven local subscribe

- pipelinedSender.Send: add case <-s.done to unblock when sender goroutine
  exits (fixes deadlock when errCh was already consumed by a prior Send).
- pipelinedSender.reportErr: remove for-range drain on sendCh that could
  block indefinitely. Send() now detects exit via s.done instead.
- SubscribeLocalMetadata: replace remaining time.Sleep(1127ms) in the
  gap-detected-no-memory-data path with event-driven listenersCond.Wait(),
  consistent with the rest of the subscription paths.
- Add TestPipelinedSenderErrorPropagation: verifies error surfaces via
  Send and Close when the underlying stream fails.
- Replace goto with labeled break in test simulatePipeline.

* filer.sync: check error returns in test code

- direct_send: check slowStream.Send error return
- pipelined_batched_send: check sender.Close error return
- simulatePipeline: return error from sender.Close, propagate to callers

---------

Co-authored-by: Copilot <copilot@github.com>
2026-03-26 23:55:42 -07:00

305 lines
9.1 KiB
Go

package filer
import (
"context"
"fmt"
"io"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
)
type MetaAggregator struct {
filer *Filer
self pb.ServerAddress
isLeader bool
grpcDialOption grpc.DialOption
MetaLogBuffer *log_buffer.LogBuffer
peerChans map[pb.ServerAddress]chan struct{}
peerChansLock sync.Mutex
// notifying clients
ListenersLock sync.Mutex
ListenersWaits int64 // Atomic counter
ListenersCond *sync.Cond
}
// MetaAggregator only aggregates data "on the fly". The logs are not re-persisted to disk.
// The old data comes from what each LocalMetadata persisted on disk.
func NewMetaAggregator(filer *Filer, self pb.ServerAddress, grpcDialOption grpc.DialOption) *MetaAggregator {
t := &MetaAggregator{
filer: filer,
self: self,
grpcDialOption: grpcDialOption,
peerChans: make(map[pb.ServerAddress]chan struct{}),
}
t.ListenersCond = sync.NewCond(&t.ListenersLock)
t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, nil, func() {
if atomic.LoadInt64(&t.ListenersWaits) > 0 {
t.ListenersCond.Broadcast()
}
})
return t
}
func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, startFrom time.Time) {
ma.peerChansLock.Lock()
defer ma.peerChansLock.Unlock()
address := pb.ServerAddress(update.Address)
if update.IsAdd {
// cancel previous subscription if any
if prevChan, found := ma.peerChans[address]; found {
close(prevChan)
}
stopChan := make(chan struct{})
ma.peerChans[address] = stopChan
go ma.loopSubscribeToOneFiler(ma.filer, ma.self, address, startFrom, stopChan)
} else {
if prevChan, found := ma.peerChans[address]; found {
close(prevChan)
delete(ma.peerChans, address)
}
}
}
func (ma *MetaAggregator) HasRemotePeers() bool {
ma.peerChansLock.Lock()
defer ma.peerChansLock.Unlock()
for address := range ma.peerChans {
if address != ma.self {
return true
}
}
return false
}
func (ma *MetaAggregator) loopSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom time.Time, stopChan chan struct{}) {
lastTsNs := startFrom.UnixNano()
for {
glog.V(0).Infof("loopSubscribeToOneFiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs)
nextLastTsNs, err := ma.doSubscribeToOneFiler(f, self, peer, lastTsNs)
// check stopChan to see if we should stop
select {
case <-stopChan:
glog.V(0).Infof("stop subscribing peer %s meta change", peer)
return
default:
}
if err != nil {
errLvl := glog.Level(0)
if strings.Contains(err.Error(), "duplicated local subscription detected") {
errLvl = glog.Level(4)
}
glog.V(errLvl).Infof("subscribing remote %s meta change: %v", peer, err)
}
if lastTsNs < nextLastTsNs {
lastTsNs = nextLastTsNs
}
time.Sleep(1733 * time.Millisecond)
}
}
func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom int64) (int64, error) {
/*
Each filer reads the "filer.store.id", which is the store's signature when filer starts.
When reading from other filers' local meta changes:
* if the received change does not contain signature from self, apply the change to current filer store.
Upon connecting to other filers, need to remember their signature and their offsets.
*/
var maybeReplicateMetadataChange func(*filer_pb.SubscribeMetadataResponse)
lastPersistTime := time.Now()
lastTsNs := startFrom
peerSignature, err := ma.readFilerStoreSignature(peer)
if err != nil {
return lastTsNs, fmt.Errorf("connecting to peer filer %s: %v", peer, err)
}
// when filer store is not shared by multiple filers
if peerSignature != f.Signature {
if prevTsNs, err := ma.readOffset(f, peer, peerSignature); err == nil {
lastTsNs = prevTsNs
defer func(prevTsNs int64) {
if lastTsNs != prevTsNs && lastTsNs != lastPersistTime.UnixNano() {
if err := ma.updateOffset(f, peer, peerSignature, lastTsNs); err == nil {
glog.V(0).Infof("last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
} else {
glog.Errorf("failed to save last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
}
}
}(prevTsNs)
}
glog.V(0).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
var counter int64
var synced bool
maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) {
if err := Replay(f.Store, event); err != nil {
glog.Errorf("failed to reply metadata change from %v: %v", peer, err)
return
}
counter++
if lastPersistTime.Add(time.Minute).Before(time.Now()) {
if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil {
if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() {
glog.V(0).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0)
} else if !synced {
synced = true
glog.V(0).Infof("synced with %s", peer)
}
lastPersistTime = time.Now()
counter = 0
} else {
glog.V(0).Infof("failed to update offset for %v: %v", peer, err)
}
}
}
}
processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error {
data, err := proto.Marshal(event)
if err != nil {
glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
return err
}
dir := event.Directory
// println("received meta change", dir, "size", len(data))
if err := ma.MetaLogBuffer.AddDataToBuffer([]byte(dir), data, event.TsNs); err != nil {
glog.Errorf("failed to add data to log buffer for %s: %v", dir, err)
return err
}
if maybeReplicateMetadataChange != nil {
maybeReplicateMetadataChange(event)
}
return nil
}
glog.V(0).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFilerId)
err = pb.WithFilerClient(true, 0, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
atomic.AddInt32(&ma.filer.UniqueFilerEpoch, 1)
stream, err := client.SubscribeLocalMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: "filer:" + string(self),
PathPrefix: "/",
SinceNs: lastTsNs,
ClientId: ma.filer.UniqueFilerId,
ClientEpoch: atomic.LoadInt32(&ma.filer.UniqueFilerEpoch),
ClientSupportsBatching: true,
})
if err != nil {
glog.V(0).Infof("SubscribeLocalMetadata %v: %v", peer, err)
return fmt.Errorf("subscribe: %w", err)
}
processOne := func(event *filer_pb.SubscribeMetadataResponse) error {
if err := processEventFn(event); err != nil {
glog.V(0).Infof("SubscribeLocalMetadata process %v: %v", event, err)
return fmt.Errorf("process %v: %w", event, err)
}
f.onMetadataChangeEvent(event)
lastTsNs = event.TsNs
return nil
}
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
glog.V(0).Infof("SubscribeLocalMetadata stream %v: %v", peer, listenErr)
return listenErr
}
if err := processOne(resp); err != nil {
return err
}
// Process any additional batched events
for _, batchedEvent := range resp.Events {
if err := processOne(batchedEvent); err != nil {
return err
}
}
}
})
return lastTsNs, err
}
func (ma *MetaAggregator) readFilerStoreSignature(peer pb.ServerAddress) (sig int32, err error) {
err = pb.WithFilerClient(false, 0, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return err
}
sig = resp.Signature
return nil
})
return
}
const (
MetaOffsetPrefix = "Meta"
)
func GetPeerMetaOffsetKey(peerSignature int32) []byte {
key := []byte(MetaOffsetPrefix + "xxxx")
util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature))
return key
}
func (ma *MetaAggregator) readOffset(f *Filer, peer pb.ServerAddress, peerSignature int32) (lastTsNs int64, err error) {
key := GetPeerMetaOffsetKey(peerSignature)
value, err := f.Store.KvGet(context.Background(), key)
if err != nil {
return 0, fmt.Errorf("readOffset %s : %v", peer, err)
}
lastTsNs = int64(util.BytesToUint64(value))
glog.V(0).Infof("readOffset %s : %d", peer, lastTsNs)
return
}
func (ma *MetaAggregator) updateOffset(f *Filer, peer pb.ServerAddress, peerSignature int32, lastTsNs int64) (err error) {
key := GetPeerMetaOffsetKey(peerSignature)
value := make([]byte, 8)
util.Uint64toBytes(value, uint64(lastTsNs))
err = f.Store.KvPut(context.Background(), key, value)
if err != nil {
return fmt.Errorf("updateOffset %s : %v", peer, err)
}
glog.V(4).Infof("updateOffset %s : %d", peer, lastTsNs)
return
}