Admin UI: Add message queue to admin UI (#6958)

* add a menu item "Message Queue"

* add a menu item "Message Queue"
  * move the "brokers" link under it.
  * add "topics", "subscribers". Add pages for them.

* refactor

* show topic details

* admin display publisher and subscriber info

* remove publisher and subscribers from the topic row pull down

* collecting more stats from publishers and subscribers

* fix layout

* fix publisher name

* add local listeners for mq broker and agent

* render consumer group offsets

* remove subscribers from left menu

* topic with retention

* support editing topic retention

* show retention when listing topics

* create bucket

* Update s3_buckets_templ.go

* embed the static assets into the binary

fix https://github.com/seaweedfs/seaweedfs/issues/6964
This commit is contained in:
Chris Lu
2025-07-11 10:19:27 -07:00
committed by GitHub
parent a9e1f00673
commit 51543bbb87
44 changed files with 8296 additions and 1156 deletions

View File

@@ -62,6 +62,7 @@ func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb.
}
resp.BrokerPartitionAssignments = pub_balancer.AllocateTopicPartitions(b.PubBalancer.Brokers, request.PartitionCount)
resp.RecordType = request.RecordType
resp.Retention = request.Retention
// save the topic configuration on filer
if err := b.fca.SaveTopicConfToFiler(t, resp); err != nil {

View File

@@ -3,9 +3,13 @@ package broker
import (
"context"
"fmt"
"strings"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
)
@@ -50,27 +54,259 @@ func (b *MessageQueueBroker) ListTopics(ctx context.Context, request *mq_pb.List
}
ret := &mq_pb.ListTopicsResponse{}
knownTopics := make(map[string]struct{})
for brokerStatsItem := range b.PubBalancer.Brokers.IterBuffered() {
_, brokerStats := brokerStatsItem.Key, brokerStatsItem.Val
for topicPartitionStatsItem := range brokerStats.TopicPartitionStats.IterBuffered() {
topicPartitionStat := topicPartitionStatsItem.Val
topic := &schema_pb.Topic{
Namespace: topicPartitionStat.TopicPartition.Namespace,
Name: topicPartitionStat.TopicPartition.Name,
// Scan the filer directory structure to find all topics
err = b.fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
// List all namespaces under /topics
stream, err := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{
Directory: filer.TopicsDir,
Limit: 1000,
})
if err != nil {
glog.V(0).Infof("list namespaces in %s: %v", filer.TopicsDir, err)
return err
}
// Process each namespace
for {
resp, err := stream.Recv()
if err != nil {
if err.Error() == "EOF" {
break
}
return err
}
topicKey := fmt.Sprintf("%s/%s", topic.Namespace, topic.Name)
if _, found := knownTopics[topicKey]; found {
if !resp.Entry.IsDirectory {
continue
}
knownTopics[topicKey] = struct{}{}
ret.Topics = append(ret.Topics, topic)
namespaceName := resp.Entry.Name
namespacePath := fmt.Sprintf("%s/%s", filer.TopicsDir, namespaceName)
// List all topics in this namespace
topicStream, err := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{
Directory: namespacePath,
Limit: 1000,
})
if err != nil {
glog.V(0).Infof("list topics in namespace %s: %v", namespacePath, err)
continue
}
// Process each topic in the namespace
for {
topicResp, err := topicStream.Recv()
if err != nil {
if err.Error() == "EOF" {
break
}
glog.V(0).Infof("error reading topic stream in namespace %s: %v", namespaceName, err)
break
}
if !topicResp.Entry.IsDirectory {
continue
}
topicName := topicResp.Entry.Name
// Check if topic.conf exists
topicPath := fmt.Sprintf("%s/%s", namespacePath, topicName)
confResp, err := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{
Directory: topicPath,
Name: filer.TopicConfFile,
})
if err != nil {
glog.V(0).Infof("lookup topic.conf in %s: %v", topicPath, err)
continue
}
if confResp.Entry != nil {
// This is a valid topic
topic := &schema_pb.Topic{
Namespace: namespaceName,
Name: topicName,
}
ret.Topics = append(ret.Topics, topic)
}
}
}
return nil
})
if err != nil {
glog.V(0).Infof("list topics from filer: %v", err)
// Return empty response on error
return &mq_pb.ListTopicsResponse{}, nil
}
return ret, nil
}
// GetTopicConfiguration returns the complete configuration of a topic including schema and partition assignments
func (b *MessageQueueBroker) GetTopicConfiguration(ctx context.Context, request *mq_pb.GetTopicConfigurationRequest) (resp *mq_pb.GetTopicConfigurationResponse, err error) {
if !b.isLockOwner() {
proxyErr := b.withBrokerClient(false, pb.ServerAddress(b.lockAsBalancer.LockOwner()), func(client mq_pb.SeaweedMessagingClient) error {
resp, err = client.GetTopicConfiguration(ctx, request)
return nil
})
if proxyErr != nil {
return nil, proxyErr
}
return resp, err
}
t := topic.FromPbTopic(request.Topic)
var conf *mq_pb.ConfigureTopicResponse
var createdAtNs, modifiedAtNs int64
if conf, createdAtNs, modifiedAtNs, err = b.fca.ReadTopicConfFromFilerWithMetadata(t); err != nil {
glog.V(0).Infof("get topic configuration %s: %v", request.Topic, err)
return nil, fmt.Errorf("failed to read topic configuration: %v", err)
}
// Ensure topic assignments are active
err = b.ensureTopicActiveAssignments(t, conf)
if err != nil {
glog.V(0).Infof("ensure topic active assignments %s: %v", request.Topic, err)
return nil, fmt.Errorf("failed to ensure topic assignments: %v", err)
}
// Build the response with complete configuration including metadata
ret := &mq_pb.GetTopicConfigurationResponse{
Topic: request.Topic,
PartitionCount: int32(len(conf.BrokerPartitionAssignments)),
RecordType: conf.RecordType,
BrokerPartitionAssignments: conf.BrokerPartitionAssignments,
CreatedAtNs: createdAtNs,
LastUpdatedNs: modifiedAtNs,
Retention: conf.Retention,
}
return ret, nil
}
// GetTopicPublishers returns the active publishers for a topic
func (b *MessageQueueBroker) GetTopicPublishers(ctx context.Context, request *mq_pb.GetTopicPublishersRequest) (resp *mq_pb.GetTopicPublishersResponse, err error) {
if !b.isLockOwner() {
proxyErr := b.withBrokerClient(false, pb.ServerAddress(b.lockAsBalancer.LockOwner()), func(client mq_pb.SeaweedMessagingClient) error {
resp, err = client.GetTopicPublishers(ctx, request)
return nil
})
if proxyErr != nil {
return nil, proxyErr
}
return resp, err
}
t := topic.FromPbTopic(request.Topic)
var publishers []*mq_pb.TopicPublisher
// Get topic configuration to find partition assignments
var conf *mq_pb.ConfigureTopicResponse
if conf, _, _, err = b.fca.ReadTopicConfFromFilerWithMetadata(t); err != nil {
glog.V(0).Infof("get topic configuration for publishers %s: %v", request.Topic, err)
return nil, fmt.Errorf("failed to read topic configuration: %v", err)
}
// Collect publishers from each partition that is hosted on this broker
for _, assignment := range conf.BrokerPartitionAssignments {
// Only collect from partitions where this broker is the leader
if assignment.LeaderBroker == b.option.BrokerAddress().String() {
partition := topic.FromPbPartition(assignment.Partition)
if localPartition := b.localTopicManager.GetLocalPartition(t, partition); localPartition != nil {
// Get publisher information from local partition
localPartition.Publishers.ForEachPublisher(func(clientName string, publisher *topic.LocalPublisher) {
connectTimeNs, lastSeenTimeNs := publisher.GetTimestamps()
lastPublishedOffset, lastAckedOffset := publisher.GetOffsets()
publishers = append(publishers, &mq_pb.TopicPublisher{
PublisherName: clientName,
ClientId: clientName, // For now, client name is used as client ID
Partition: assignment.Partition,
ConnectTimeNs: connectTimeNs,
LastSeenTimeNs: lastSeenTimeNs,
Broker: assignment.LeaderBroker,
IsActive: true,
LastPublishedOffset: lastPublishedOffset,
LastAckedOffset: lastAckedOffset,
})
})
}
}
}
return &mq_pb.GetTopicPublishersResponse{
Publishers: publishers,
}, nil
}
// GetTopicSubscribers returns the active subscribers for a topic
func (b *MessageQueueBroker) GetTopicSubscribers(ctx context.Context, request *mq_pb.GetTopicSubscribersRequest) (resp *mq_pb.GetTopicSubscribersResponse, err error) {
if !b.isLockOwner() {
proxyErr := b.withBrokerClient(false, pb.ServerAddress(b.lockAsBalancer.LockOwner()), func(client mq_pb.SeaweedMessagingClient) error {
resp, err = client.GetTopicSubscribers(ctx, request)
return nil
})
if proxyErr != nil {
return nil, proxyErr
}
return resp, err
}
t := topic.FromPbTopic(request.Topic)
var subscribers []*mq_pb.TopicSubscriber
// Get topic configuration to find partition assignments
var conf *mq_pb.ConfigureTopicResponse
if conf, _, _, err = b.fca.ReadTopicConfFromFilerWithMetadata(t); err != nil {
glog.V(0).Infof("get topic configuration for subscribers %s: %v", request.Topic, err)
return nil, fmt.Errorf("failed to read topic configuration: %v", err)
}
// Collect subscribers from each partition that is hosted on this broker
for _, assignment := range conf.BrokerPartitionAssignments {
// Only collect from partitions where this broker is the leader
if assignment.LeaderBroker == b.option.BrokerAddress().String() {
partition := topic.FromPbPartition(assignment.Partition)
if localPartition := b.localTopicManager.GetLocalPartition(t, partition); localPartition != nil {
// Get subscriber information from local partition
localPartition.Subscribers.ForEachSubscriber(func(clientName string, subscriber *topic.LocalSubscriber) {
// Parse client name to extract consumer group and consumer ID
// Format is typically: "consumerGroup/consumerID"
consumerGroup := "default"
consumerID := clientName
if idx := strings.Index(clientName, "/"); idx != -1 {
consumerGroup = clientName[:idx]
consumerID = clientName[idx+1:]
}
connectTimeNs, lastSeenTimeNs := subscriber.GetTimestamps()
lastReceivedOffset, lastAckedOffset := subscriber.GetOffsets()
subscribers = append(subscribers, &mq_pb.TopicSubscriber{
ConsumerGroup: consumerGroup,
ConsumerId: consumerID,
ClientId: clientName, // Full client name as client ID
Partition: assignment.Partition,
ConnectTimeNs: connectTimeNs,
LastSeenTimeNs: lastSeenTimeNs,
Broker: assignment.LeaderBroker,
IsActive: true,
CurrentOffset: lastAckedOffset, // for compatibility
LastReceivedOffset: lastReceivedOffset,
})
})
}
}
}
return &mq_pb.GetTopicSubscribersResponse{
Subscribers: subscribers,
}, nil
}
func (b *MessageQueueBroker) isLockOwner() bool {
return b.lockAsBalancer.LockOwner() == b.option.BrokerAddress().String()
}

View File

@@ -3,15 +3,16 @@ package broker
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"google.golang.org/grpc/peer"
"io"
"math/rand"
"net"
"sync/atomic"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"google.golang.org/grpc/peer"
)
// PUB
@@ -69,6 +70,11 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
var receivedSequence, acknowledgedSequence int64
var isClosed bool
// process each published messages
clientName := fmt.Sprintf("%v-%4d", findClientAddress(stream.Context()), rand.Intn(10000))
publisher := topic.NewLocalPublisher()
localTopicPartition.Publishers.AddPublisher(clientName, publisher)
// start sending ack to publisher
ackInterval := int64(1)
if initMessage.AckInterval > 0 {
@@ -90,6 +96,8 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
if err := stream.Send(response); err != nil {
glog.Errorf("Error sending response %v: %v", response, err)
}
// Update acknowledged offset for this publisher
publisher.UpdateAckedOffset(acknowledgedSequence)
// println("sent ack", acknowledgedSequence, "=>", initMessage.PublisherName)
lastAckTime = time.Now()
} else {
@@ -98,10 +106,6 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
}
}()
// process each published messages
clientName := fmt.Sprintf("%v-%4d/%s/%v", findClientAddress(stream.Context()), rand.Intn(10000), initMessage.Topic, initMessage.Partition)
localTopicPartition.Publishers.AddPublisher(clientName, topic.NewLocalPublisher())
defer func() {
// remove the publisher
localTopicPartition.Publishers.RemovePublisher(clientName)
@@ -143,6 +147,9 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
if err = localTopicPartition.Publish(dataMessage); err != nil {
return fmt.Errorf("topic %v partition %v publish error: %v", initMessage.Topic, initMessage.Partition, err)
}
// Update published offset and last seen time for this publisher
publisher.UpdatePublishedOffset(dataMessage.TsNs)
}
glog.V(0).Infof("topic %v partition %v publish stream from %s closed.", initMessage.Topic, initMessage.Partition, initMessage.PublisherName)

View File

@@ -4,6 +4,9 @@ import (
"context"
"errors"
"fmt"
"io"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/sub_coordinator"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
@@ -12,8 +15,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
"io"
"time"
)
func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_SubscribeMessageServer) error {
@@ -40,7 +41,8 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs
return getOrGenErr
}
localTopicPartition.Subscribers.AddSubscriber(clientName, topic.NewLocalSubscriber())
subscriber := topic.NewLocalSubscriber()
localTopicPartition.Subscribers.AddSubscriber(clientName, subscriber)
glog.V(0).Infof("Subscriber %s connected on %v %v", clientName, t, partition)
isConnected := true
sleepIntervalCount := 0
@@ -115,7 +117,10 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs
continue
}
imt.AcknowledgeMessage(ack.GetAck().Key, ack.GetAck().Sequence)
currentLastOffset := imt.GetOldestAckedTimestamp()
// Update acknowledged offset and last seen time for this subscriber when it sends an ack
subscriber.UpdateAckedOffset(currentLastOffset)
// fmt.Printf("%+v recv (%s,%d), oldest %d\n", partition, string(ack.GetAck().Key), ack.GetAck().Sequence, currentLastOffset)
if subscribeFollowMeStream != nil && currentLastOffset > lastOffset {
if err := subscribeFollowMeStream.Send(&mq_pb.SubscribeFollowMeRequest{
@@ -211,6 +216,9 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs
return false, err
}
// Update received offset and last seen time for this subscriber
subscriber.UpdateReceivedOffset(logEntry.TsNs)
counter++
return false, nil
})