* filer.sync: pipelined subscription with adaptive batching for faster catch-up The SubscribeMetadata pipeline was fully serial: reading a log entry from a volume server, unmarshaling, filtering, and calling stream.Send() all happened one-at-a-time. stream.Send() blocked the entire pipeline until the client acknowledged each event, limiting throughput to ~80 events/sec regardless of the -concurrency setting. Three server-side optimizations that stack: 1. Pipelined sender: decouple stream.Send() from the read loop via a buffered channel (1024 messages). A dedicated goroutine handles gRPC delivery while the reader continues processing the next events. 2. Adaptive batching: when event timestamps are >2min behind wall clock (backlog catch-up), drain multiple events from the channel and pack them into a single stream.Send() using a new `repeated events` field on SubscribeMetadataResponse. When events are recent (real-time), send one-by-one for low latency. Old clients ignore the new field (backward compatible). 3. Persisted log readahead: run the OrderedLogVisitor in a background goroutine so volume server I/O for the next log file overlaps with event processing and gRPC delivery. 4. Event-driven aggregated subscription: replace time.Sleep(1127ms) polling in SubscribeMetadata with notification-driven wake-up using the MetaLogBuffer subscriber mechanism, reducing real-time latency from ~1127ms to sub-millisecond. Combined, these create a 3-stage pipeline: [Volume I/O → readahead buffer] → [Filter → send buffer] → [gRPC Send] Test results (simulated backlog with 50µs gRPC latency per Send): direct (old): 2100 events 2100 sends 168ms 12,512 events/sec pipelined+batched: 2100 events 14 sends 40ms 52,856 events/sec Speedup: 4.2x single-stream throughput Ref: #8771 * filer.sync: require client opt-in for batch event delivery Add ClientSupportsBatching field to SubscribeMetadataRequest. The server only packs events into the Events batch field when the client explicitly sets this flag to true. Old clients (Java SDK, third-party) that don't set the flag get one-event-per-Send, preserving backward compatibility. All Go callers (FollowMetadata, MetaAggregator) set the flag to true since their recv loops already unpack batched events. * filer.sync: clear batch Events field after Send to release references Prevents the envelope message from holding references to the rest of the batch after gRPC serialization, allowing the GC to collect them sooner. * filer.sync: fix Send deadlock, add error propagation test, event-driven local subscribe - pipelinedSender.Send: add case <-s.done to unblock when sender goroutine exits (fixes deadlock when errCh was already consumed by a prior Send). - pipelinedSender.reportErr: remove for-range drain on sendCh that could block indefinitely. Send() now detects exit via s.done instead. - SubscribeLocalMetadata: replace remaining time.Sleep(1127ms) in the gap-detected-no-memory-data path with event-driven listenersCond.Wait(), consistent with the rest of the subscription paths. - Add TestPipelinedSenderErrorPropagation: verifies error surfaces via Send and Close when the underlying stream fails. - Replace goto with labeled break in test simulatePipeline. * filer.sync: check error returns in test code - direct_send: check slowStream.Send error return - pipelined_batched_send: check sender.Close error return - simulatePipeline: return error from sender.Close, propagate to callers --------- Co-authored-by: Copilot <copilot@github.com>
145 lines
4.2 KiB
Go
145 lines
4.2 KiB
Go
package pb
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"time"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
|
"google.golang.org/grpc"
|
|
)
|
|
|
|
type EventErrorType int
|
|
|
|
const (
|
|
TrivialOnError EventErrorType = iota
|
|
FatalOnError
|
|
RetryForeverOnError
|
|
DontLogError
|
|
)
|
|
|
|
// MetadataFollowOption is used to control the behavior of the metadata following
|
|
// process. Part of it is used as a cursor to resume the following process.
|
|
type MetadataFollowOption struct {
|
|
ClientName string
|
|
ClientId int32
|
|
ClientEpoch int32
|
|
SelfSignature int32
|
|
PathPrefix string
|
|
AdditionalPathPrefixes []string
|
|
DirectoriesToWatch []string
|
|
StartTsNs int64
|
|
StopTsNs int64
|
|
EventErrorType EventErrorType
|
|
}
|
|
|
|
type ProcessMetadataFunc func(resp *filer_pb.SubscribeMetadataResponse) error
|
|
|
|
func FollowMetadata(filerAddress ServerAddress, grpcDialOption grpc.DialOption, option *MetadataFollowOption, processEventFn ProcessMetadataFunc) error {
|
|
|
|
err := WithFilerClient(true, option.SelfSignature, filerAddress, grpcDialOption, makeSubscribeMetadataFunc(option, processEventFn))
|
|
if err != nil {
|
|
return fmt.Errorf("subscribing filer meta change: %w", err)
|
|
}
|
|
return err
|
|
}
|
|
|
|
func WithFilerClientFollowMetadata(filerClient filer_pb.FilerClient, option *MetadataFollowOption, processEventFn ProcessMetadataFunc) error {
|
|
|
|
err := filerClient.WithFilerClient(true, makeSubscribeMetadataFunc(option, processEventFn))
|
|
if err != nil {
|
|
return fmt.Errorf("subscribing filer meta change: %w", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func makeSubscribeMetadataFunc(option *MetadataFollowOption, processEventFn ProcessMetadataFunc) func(client filer_pb.SeaweedFilerClient) error {
|
|
return func(client filer_pb.SeaweedFilerClient) error {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
|
|
ClientName: option.ClientName,
|
|
PathPrefix: option.PathPrefix,
|
|
PathPrefixes: option.AdditionalPathPrefixes,
|
|
Directories: option.DirectoriesToWatch,
|
|
SinceNs: option.StartTsNs,
|
|
Signature: option.SelfSignature,
|
|
ClientId: option.ClientId,
|
|
ClientEpoch: option.ClientEpoch,
|
|
UntilNs: option.StopTsNs,
|
|
ClientSupportsBatching: true,
|
|
})
|
|
if err != nil {
|
|
return fmt.Errorf("subscribe: %w", err)
|
|
}
|
|
|
|
handleErr := func(resp *filer_pb.SubscribeMetadataResponse, err error) {
|
|
switch option.EventErrorType {
|
|
case TrivialOnError:
|
|
glog.Errorf("process %v: %v", resp, err)
|
|
case FatalOnError:
|
|
glog.Fatalf("process %v: %v", resp, err)
|
|
case RetryForeverOnError:
|
|
util.RetryUntil("followMetaUpdates", func() error {
|
|
return processEventFn(resp)
|
|
}, func(err error) bool {
|
|
glog.Errorf("process %v: %v", resp, err)
|
|
return true
|
|
})
|
|
case DontLogError:
|
|
// pass
|
|
default:
|
|
glog.Errorf("process %v: %v", resp, err)
|
|
}
|
|
}
|
|
|
|
for {
|
|
resp, listenErr := stream.Recv()
|
|
if listenErr == io.EOF {
|
|
return nil
|
|
}
|
|
if listenErr != nil {
|
|
return listenErr
|
|
}
|
|
|
|
// Process the first event (always present in top-level fields)
|
|
if err := processEventFn(resp); err != nil {
|
|
handleErr(resp, err)
|
|
}
|
|
option.StartTsNs = resp.TsNs
|
|
|
|
// Process any additional batched events
|
|
for _, batchedEvent := range resp.Events {
|
|
if err := processEventFn(batchedEvent); err != nil {
|
|
handleErr(batchedEvent, err)
|
|
}
|
|
option.StartTsNs = batchedEvent.TsNs
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func AddOffsetFunc(processEventFn ProcessMetadataFunc, offsetInterval time.Duration, offsetFunc func(counter int64, offset int64) error) ProcessMetadataFunc {
|
|
var counter int64
|
|
var lastWriteTime = time.Now()
|
|
return func(resp *filer_pb.SubscribeMetadataResponse) error {
|
|
if err := processEventFn(resp); err != nil {
|
|
return err
|
|
}
|
|
counter++
|
|
if lastWriteTime.Add(offsetInterval).Before(time.Now()) {
|
|
lastWriteTime = time.Now()
|
|
if err := offsetFunc(counter, resp.TsNs); err != nil {
|
|
return err
|
|
}
|
|
counter = 0
|
|
}
|
|
return nil
|
|
}
|
|
|
|
}
|