Accumulated changes for message queue (#6600)
* rename * set agent address * refactor * add agent sub * pub messages * grpc new client * can publish records via agent * send init message with session id * fmt * check cancelled request while waiting * use sessionId * handle possible nil stream * subscriber process messages * separate debug port * use atomic int64 * less logs * minor * skip io.EOF * rename * remove unused * use saved offsets * do not reuse session, since always session id is new after restart remove last active ts from SessionEntry * simplify printing * purge unused * just proxy the subscription, skipping the session step * adjust offset types * subscribe offset type and possible value * start after the known tsns * avoid wrongly set startPosition * move * remove * refactor * typo * fix * fix changed path
This commit is contained in:
@@ -1,7 +1,6 @@
|
||||
package sub_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
|
||||
@@ -12,10 +11,17 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() {
|
||||
waitTime := 1 * time.Second
|
||||
for {
|
||||
for _, broker := range sub.bootstrapBrokers {
|
||||
|
||||
select {
|
||||
case <-sub.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// lookup topic brokers
|
||||
var brokerLeader string
|
||||
err := pb.WithBrokerGrpcClient(false, broker, sub.SubscriberConfig.GrpcDialOption, func(client mq_pb.SeaweedMessagingClient) error {
|
||||
resp, err := client.FindBrokerLeader(context.Background(), &mq_pb.FindBrokerLeaderRequest{})
|
||||
resp, err := client.FindBrokerLeader(sub.ctx, &mq_pb.FindBrokerLeaderRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -30,10 +36,8 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() {
|
||||
|
||||
// connect to the balancer
|
||||
pb.WithBrokerGrpcClient(true, brokerLeader, sub.SubscriberConfig.GrpcDialOption, func(client mq_pb.SeaweedMessagingClient) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
stream, err := client.SubscriberToSubCoordinator(ctx)
|
||||
stream, err := client.SubscriberToSubCoordinator(sub.ctx)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("subscriber %s: %v", sub.ContentConfig.Topic, err)
|
||||
return err
|
||||
@@ -58,6 +62,13 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() {
|
||||
|
||||
go func() {
|
||||
for reply := range sub.brokerPartitionAssignmentAckChan {
|
||||
|
||||
select {
|
||||
case <-sub.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
glog.V(0).Infof("subscriber instance %s ack %+v", sub.SubscriberConfig.ConsumerGroupInstanceId, reply)
|
||||
if err := stream.Send(reply); err != nil {
|
||||
glog.V(0).Infof("subscriber %s reply: %v", sub.ContentConfig.Topic, err)
|
||||
@@ -73,6 +84,13 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() {
|
||||
glog.V(0).Infof("subscriber %s receive: %v", sub.ContentConfig.Topic, err)
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-sub.ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
sub.brokerPartitionAssignmentChan <- resp
|
||||
glog.V(0).Infof("Received assignment: %+v", resp)
|
||||
}
|
||||
|
||||
@@ -2,14 +2,13 @@ package sub_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
"io"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type KeyedOffset struct {
|
||||
@@ -35,8 +34,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
|
||||
if po == nil {
|
||||
po = &schema_pb.PartitionOffset{
|
||||
Partition: assigned.Partition,
|
||||
StartTsNs: time.Now().UnixNano(),
|
||||
StartType: schema_pb.PartitionOffsetStartType_EARLIEST_IN_MEMORY,
|
||||
StartTsNs: sub.ContentConfig.OffsetTsNs,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,6 +45,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
|
||||
ConsumerId: sub.SubscriberConfig.ConsumerGroupInstanceId,
|
||||
Topic: sub.ContentConfig.Topic.ToPbTopic(),
|
||||
PartitionOffset: po,
|
||||
OffsetType: sub.ContentConfig.OffsetType,
|
||||
Filter: sub.ContentConfig.Filter,
|
||||
FollowerBroker: assigned.FollowerBroker,
|
||||
SlidingWindowSize: slidingWindowSize,
|
||||
@@ -65,6 +64,9 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-sub.ctx.Done():
|
||||
subscribeClient.CloseSend()
|
||||
return
|
||||
case <-stopCh:
|
||||
subscribeClient.CloseSend()
|
||||
return
|
||||
@@ -86,15 +88,27 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
|
||||
}()
|
||||
|
||||
for {
|
||||
// glog.V(0).Infof("subscriber %s/%s/%s waiting for message", sub.ContentConfig.Namespace, sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup)
|
||||
// glog.V(0).Infof("subscriber %s/%s waiting for message", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup)
|
||||
resp, err := subscribeClient.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("subscribe recv: %v", err)
|
||||
}
|
||||
if resp.Message == nil {
|
||||
glog.V(0).Infof("subscriber %s/%s received nil message", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup)
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-sub.ctx.Done():
|
||||
return nil
|
||||
case <-stopCh:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
switch m := resp.Message.(type) {
|
||||
case *mq_pb.SubscribeMessageResponse_Data:
|
||||
if m.Data.Ctrl != nil {
|
||||
@@ -102,7 +116,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
|
||||
continue
|
||||
}
|
||||
if len(m.Data.Key) == 0 {
|
||||
fmt.Printf("empty key %+v, type %v\n", m, reflect.TypeOf(m))
|
||||
// fmt.Printf("empty key %+v, type %v\n", m, reflect.TypeOf(m))
|
||||
continue
|
||||
}
|
||||
onDataMessageFn(m)
|
||||
|
||||
@@ -72,12 +72,12 @@ func (sub *TopicSubscriber) startProcessors() {
|
||||
executors := util.NewLimitedConcurrentExecutor(int(sub.SubscriberConfig.SlidingWindowSize))
|
||||
onDataMessageFn := func(m *mq_pb.SubscribeMessageResponse_Data) {
|
||||
executors.Execute(func() {
|
||||
processErr := sub.OnEachMessageFunc(m.Data.Key, m.Data.Value)
|
||||
if processErr == nil {
|
||||
sub.PartitionOffsetChan <- KeyedOffset{
|
||||
Key: m.Data.Key,
|
||||
Offset: m.Data.TsNs,
|
||||
}
|
||||
if sub.OnDataMessageFunc != nil {
|
||||
sub.OnDataMessageFunc(m)
|
||||
}
|
||||
sub.PartitionOffsetChan <- KeyedOffset{
|
||||
Key: m.Data.Key,
|
||||
Offset: m.Data.TsNs,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package sub_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
@@ -17,50 +18,50 @@ type SubscriberConfiguration struct {
|
||||
SlidingWindowSize int32 // how many messages to process concurrently per partition
|
||||
}
|
||||
|
||||
func (s *SubscriberConfiguration) String() string {
|
||||
return "ClientId: " + s.ClientId + ", ConsumerGroup: " + s.ConsumerGroup + ", ConsumerGroupInstanceId: " + s.ConsumerGroupInstanceId
|
||||
}
|
||||
|
||||
type ContentConfiguration struct {
|
||||
Topic topic.Topic
|
||||
Filter string
|
||||
PartitionOffsets []*schema_pb.PartitionOffset
|
||||
OffsetType schema_pb.OffsetType
|
||||
OffsetTsNs int64
|
||||
}
|
||||
|
||||
type OnDataMessageFn func(m *mq_pb.SubscribeMessageResponse_Data)
|
||||
type OnEachMessageFunc func(key, value []byte) (err error)
|
||||
type OnCompletionFunc func()
|
||||
|
||||
type TopicSubscriber struct {
|
||||
ctx context.Context
|
||||
SubscriberConfig *SubscriberConfiguration
|
||||
ContentConfig *ContentConfiguration
|
||||
brokerPartitionAssignmentChan chan *mq_pb.SubscriberToSubCoordinatorResponse
|
||||
brokerPartitionAssignmentAckChan chan *mq_pb.SubscriberToSubCoordinatorRequest
|
||||
OnDataMessageFnnc OnDataMessageFn
|
||||
OnEachMessageFunc OnEachMessageFunc
|
||||
OnDataMessageFunc OnDataMessageFn
|
||||
OnCompletionFunc OnCompletionFunc
|
||||
bootstrapBrokers []string
|
||||
waitForMoreMessage bool
|
||||
activeProcessors map[topic.Partition]*ProcessorState
|
||||
activeProcessorsLock sync.Mutex
|
||||
PartitionOffsetChan chan KeyedOffset
|
||||
}
|
||||
|
||||
func NewTopicSubscriber(bootstrapBrokers []string, subscriber *SubscriberConfiguration, content *ContentConfiguration, partitionOffsetChan chan KeyedOffset) *TopicSubscriber {
|
||||
func NewTopicSubscriber(ctx context.Context, bootstrapBrokers []string, subscriber *SubscriberConfiguration, content *ContentConfiguration, partitionOffsetChan chan KeyedOffset) *TopicSubscriber {
|
||||
return &TopicSubscriber{
|
||||
ctx: ctx,
|
||||
SubscriberConfig: subscriber,
|
||||
ContentConfig: content,
|
||||
brokerPartitionAssignmentChan: make(chan *mq_pb.SubscriberToSubCoordinatorResponse, 1024),
|
||||
brokerPartitionAssignmentAckChan: make(chan *mq_pb.SubscriberToSubCoordinatorRequest, 1024),
|
||||
bootstrapBrokers: bootstrapBrokers,
|
||||
waitForMoreMessage: true,
|
||||
activeProcessors: make(map[topic.Partition]*ProcessorState),
|
||||
PartitionOffsetChan: partitionOffsetChan,
|
||||
}
|
||||
}
|
||||
|
||||
func (sub *TopicSubscriber) SetEachMessageFunc(onEachMessageFn OnEachMessageFunc) {
|
||||
sub.OnEachMessageFunc = onEachMessageFn
|
||||
}
|
||||
|
||||
func (sub *TopicSubscriber) SetOnDataMessageFn(fn OnDataMessageFn) {
|
||||
sub.OnDataMessageFnnc = fn
|
||||
sub.OnDataMessageFunc = fn
|
||||
}
|
||||
|
||||
func (sub *TopicSubscriber) SetCompletionFunc(onCompletionFn OnCompletionFunc) {
|
||||
|
||||
Reference in New Issue
Block a user