Admin UI: Add message queue to admin UI (#6958)

* add a menu item "Message Queue"

* add a menu item "Message Queue"
  * move the "brokers" link under it.
  * add "topics", "subscribers". Add pages for them.

* refactor

* show topic details

* admin display publisher and subscriber info

* remove publisher and subscribers from the topic row pull down

* collecting more stats from publishers and subscribers

* fix layout

* fix publisher name

* add local listeners for mq broker and agent

* render consumer group offsets

* remove subscribers from left menu

* topic with retention

* support editing topic retention

* show retention when listing topics

* create bucket

* Update s3_buckets_templ.go

* embed the static assets into the binary

fix https://github.com/seaweedfs/seaweedfs/issues/6964
This commit is contained in:
Chris Lu
2025-07-11 10:19:27 -07:00
committed by GitHub
parent a9e1f00673
commit 51543bbb87
44 changed files with 8296 additions and 1156 deletions

View File

@@ -12,15 +12,16 @@ import (
)
type AdminData struct {
Username string `json:"username"`
TotalVolumes int `json:"total_volumes"`
TotalFiles int64 `json:"total_files"`
TotalSize int64 `json:"total_size"`
MasterNodes []MasterNode `json:"master_nodes"`
VolumeServers []VolumeServer `json:"volume_servers"`
FilerNodes []FilerNode `json:"filer_nodes"`
DataCenters []DataCenter `json:"datacenters"`
LastUpdated time.Time `json:"last_updated"`
Username string `json:"username"`
TotalVolumes int `json:"total_volumes"`
TotalFiles int64 `json:"total_files"`
TotalSize int64 `json:"total_size"`
MasterNodes []MasterNode `json:"master_nodes"`
VolumeServers []VolumeServer `json:"volume_servers"`
FilerNodes []FilerNode `json:"filer_nodes"`
MessageBrokers []MessageBrokerNode `json:"message_brokers"`
DataCenters []DataCenter `json:"datacenters"`
LastUpdated time.Time `json:"last_updated"`
}
// Object Store Users management structures
@@ -76,6 +77,13 @@ type FilerNode struct {
LastUpdated time.Time `json:"last_updated"`
}
type MessageBrokerNode struct {
Address string `json:"address"`
DataCenter string `json:"datacenter"`
Rack string `json:"rack"`
LastUpdated time.Time `json:"last_updated"`
}
// GetAdminData retrieves admin data as a struct (for reuse by both JSON and HTML handlers)
func (s *AdminServer) GetAdminData(username string) (AdminData, error) {
if username == "" {
@@ -95,17 +103,21 @@ func (s *AdminServer) GetAdminData(username string) (AdminData, error) {
// Get filer nodes status
filerNodes := s.getFilerNodesStatus()
// Get message broker nodes status
messageBrokers := s.getMessageBrokerNodesStatus()
// Prepare admin data
adminData := AdminData{
Username: username,
TotalVolumes: topology.TotalVolumes,
TotalFiles: topology.TotalFiles,
TotalSize: topology.TotalSize,
MasterNodes: masterNodes,
VolumeServers: topology.VolumeServers,
FilerNodes: filerNodes,
DataCenters: topology.DataCenters,
LastUpdated: topology.UpdatedAt,
Username: username,
TotalVolumes: topology.TotalVolumes,
TotalFiles: topology.TotalFiles,
TotalSize: topology.TotalSize,
MasterNodes: masterNodes,
VolumeServers: topology.VolumeServers,
FilerNodes: filerNodes,
MessageBrokers: messageBrokers,
DataCenters: topology.DataCenters,
LastUpdated: topology.UpdatedAt,
}
return adminData, nil
@@ -200,3 +212,38 @@ func (s *AdminServer) getFilerNodesStatus() []FilerNode {
return filerNodes
}
// getMessageBrokerNodesStatus checks status of all message broker nodes using master's ListClusterNodes
func (s *AdminServer) getMessageBrokerNodesStatus() []MessageBrokerNode {
var messageBrokers []MessageBrokerNode
// Get message broker nodes from master using ListClusterNodes
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
ClientType: cluster.BrokerType,
})
if err != nil {
return err
}
// Process each message broker node
for _, node := range resp.ClusterNodes {
messageBrokers = append(messageBrokers, MessageBrokerNode{
Address: node.Address,
DataCenter: node.DataCenter,
Rack: node.Rack,
LastUpdated: time.Now(),
})
}
return nil
})
if err != nil {
glog.Errorf("Failed to get message broker nodes from master %s: %v", s.masterAddress, err)
// Return empty list if we can't get broker info from master
return []MessageBrokerNode{}
}
return messageBrokers
}

View File

@@ -5,6 +5,7 @@ import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/gin-gonic/gin"
@@ -16,6 +17,8 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/iam_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc"
@@ -44,6 +47,9 @@ type AdminServer struct {
// Maintenance system
maintenanceManager *maintenance.MaintenanceManager
// Topic retention purger
topicRetentionPurger *TopicRetentionPurger
// Worker gRPC server
workerGrpcServer *WorkerGrpcServer
}
@@ -61,6 +67,9 @@ func NewAdminServer(masterAddress string, templateFS http.FileSystem, dataDir st
configPersistence: NewConfigPersistence(dataDir),
}
// Initialize topic retention purger
server.topicRetentionPurger = NewTopicRetentionPurger(server)
// Initialize credential manager with defaults
credentialManager, err := credential.NewCredentialManagerWithDefaults("")
if err != nil {
@@ -257,14 +266,41 @@ func (s *AdminServer) GetS3Buckets() ([]S3Bucket, error) {
quotaEnabled = false
}
// Get versioning and object lock information from extended attributes
versioningEnabled := false
objectLockEnabled := false
objectLockMode := ""
var objectLockDuration int32 = 0
if resp.Entry.Extended != nil {
if versioningBytes, exists := resp.Entry.Extended["s3.versioning"]; exists {
versioningEnabled = string(versioningBytes) == "Enabled"
}
if objectLockBytes, exists := resp.Entry.Extended["s3.objectlock"]; exists {
objectLockEnabled = string(objectLockBytes) == "Enabled"
}
if objectLockModeBytes, exists := resp.Entry.Extended["s3.objectlock.mode"]; exists {
objectLockMode = string(objectLockModeBytes)
}
if objectLockDurationBytes, exists := resp.Entry.Extended["s3.objectlock.duration"]; exists {
if duration, err := strconv.ParseInt(string(objectLockDurationBytes), 10, 32); err == nil {
objectLockDuration = int32(duration)
}
}
}
bucket := S3Bucket{
Name: bucketName,
CreatedAt: time.Unix(resp.Entry.Attributes.Crtime, 0),
Size: size,
ObjectCount: objectCount,
LastModified: time.Unix(resp.Entry.Attributes.Mtime, 0),
Quota: quota,
QuotaEnabled: quotaEnabled,
Name: bucketName,
CreatedAt: time.Unix(resp.Entry.Attributes.Crtime, 0),
Size: size,
ObjectCount: objectCount,
LastModified: time.Unix(resp.Entry.Attributes.Mtime, 0),
Quota: quota,
QuotaEnabled: quotaEnabled,
VersioningEnabled: versioningEnabled,
ObjectLockEnabled: objectLockEnabled,
ObjectLockMode: objectLockMode,
ObjectLockDuration: objectLockDuration,
}
buckets = append(buckets, bucket)
}
@@ -305,6 +341,45 @@ func (s *AdminServer) GetBucketDetails(bucketName string) (*BucketDetails, error
details.Bucket.CreatedAt = time.Unix(bucketResp.Entry.Attributes.Crtime, 0)
details.Bucket.LastModified = time.Unix(bucketResp.Entry.Attributes.Mtime, 0)
// Get quota information from entry
quota := bucketResp.Entry.Quota
quotaEnabled := quota > 0
if quota < 0 {
// Negative quota means disabled
quota = -quota
quotaEnabled = false
}
details.Bucket.Quota = quota
details.Bucket.QuotaEnabled = quotaEnabled
// Get versioning and object lock information from extended attributes
versioningEnabled := false
objectLockEnabled := false
objectLockMode := ""
var objectLockDuration int32 = 0
if bucketResp.Entry.Extended != nil {
if versioningBytes, exists := bucketResp.Entry.Extended["s3.versioning"]; exists {
versioningEnabled = string(versioningBytes) == "Enabled"
}
if objectLockBytes, exists := bucketResp.Entry.Extended["s3.objectlock"]; exists {
objectLockEnabled = string(objectLockBytes) == "Enabled"
}
if objectLockModeBytes, exists := bucketResp.Entry.Extended["s3.objectlock.mode"]; exists {
objectLockMode = string(objectLockModeBytes)
}
if objectLockDurationBytes, exists := bucketResp.Entry.Extended["s3.objectlock.duration"]; exists {
if duration, err := strconv.ParseInt(string(objectLockDurationBytes), 10, 32); err == nil {
objectLockDuration = int32(duration)
}
}
}
details.Bucket.VersioningEnabled = versioningEnabled
details.Bucket.ObjectLockEnabled = objectLockEnabled
details.Bucket.ObjectLockMode = objectLockMode
details.Bucket.ObjectLockDuration = objectLockDuration
// List objects in bucket (recursively)
return s.listBucketObjects(client, bucketPath, "", details)
})
@@ -598,6 +673,48 @@ func (s *AdminServer) GetClusterFilers() (*ClusterFilersData, error) {
}, nil
}
// GetClusterBrokers retrieves cluster message brokers data
func (s *AdminServer) GetClusterBrokers() (*ClusterBrokersData, error) {
var brokers []MessageBrokerInfo
// Get broker information from master using ListClusterNodes
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
ClientType: cluster.BrokerType,
})
if err != nil {
return err
}
// Process each broker node
for _, node := range resp.ClusterNodes {
createdAt := time.Unix(0, node.CreatedAtNs)
brokerInfo := MessageBrokerInfo{
Address: node.Address,
DataCenter: node.DataCenter,
Rack: node.Rack,
Version: node.Version,
CreatedAt: createdAt,
}
brokers = append(brokers, brokerInfo)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to get broker nodes from master: %v", err)
}
return &ClusterBrokersData{
Brokers: brokers,
TotalBrokers: len(brokers),
LastUpdated: time.Now(),
}, nil
}
// GetAllFilers method moved to client_management.go
// GetVolumeDetails method moved to volume_management.go
@@ -1054,6 +1171,17 @@ func (as *AdminServer) triggerMaintenanceScan() error {
return as.maintenanceManager.TriggerScan()
}
// TriggerTopicRetentionPurgeAPI triggers topic retention purge via HTTP API
func (as *AdminServer) TriggerTopicRetentionPurgeAPI(c *gin.Context) {
err := as.TriggerTopicRetentionPurge()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"message": "Topic retention purge triggered successfully"})
}
// GetConfigInfo returns information about the admin configuration
func (as *AdminServer) GetConfigInfo(c *gin.Context) {
configInfo := as.configPersistence.GetConfigInfo()
@@ -1184,6 +1312,157 @@ func (s *AdminServer) StopMaintenanceManager() {
}
}
// TriggerTopicRetentionPurge triggers topic data purging based on retention policies
func (s *AdminServer) TriggerTopicRetentionPurge() error {
if s.topicRetentionPurger == nil {
return fmt.Errorf("topic retention purger not initialized")
}
glog.V(0).Infof("Triggering topic retention purge")
return s.topicRetentionPurger.PurgeExpiredTopicData()
}
// GetTopicRetentionPurger returns the topic retention purger
func (s *AdminServer) GetTopicRetentionPurger() *TopicRetentionPurger {
return s.topicRetentionPurger
}
// CreateTopicWithRetention creates a new topic with optional retention configuration
func (s *AdminServer) CreateTopicWithRetention(namespace, name string, partitionCount int32, retentionEnabled bool, retentionSeconds int64) error {
// Find broker leader to create the topic
brokerLeader, err := s.findBrokerLeader()
if err != nil {
return fmt.Errorf("failed to find broker leader: %v", err)
}
// Create retention configuration
var retention *mq_pb.TopicRetention
if retentionEnabled {
retention = &mq_pb.TopicRetention{
Enabled: true,
RetentionSeconds: retentionSeconds,
}
} else {
retention = &mq_pb.TopicRetention{
Enabled: false,
RetentionSeconds: 0,
}
}
// Create the topic via broker
err = s.withBrokerClient(brokerLeader, func(client mq_pb.SeaweedMessagingClient) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := client.ConfigureTopic(ctx, &mq_pb.ConfigureTopicRequest{
Topic: &schema_pb.Topic{
Namespace: namespace,
Name: name,
},
PartitionCount: partitionCount,
Retention: retention,
})
return err
})
if err != nil {
return fmt.Errorf("failed to create topic: %v", err)
}
glog.V(0).Infof("Created topic %s.%s with %d partitions (retention: enabled=%v, seconds=%d)",
namespace, name, partitionCount, retentionEnabled, retentionSeconds)
return nil
}
// UpdateTopicRetention updates the retention configuration for an existing topic
func (s *AdminServer) UpdateTopicRetention(namespace, name string, enabled bool, retentionSeconds int64) error {
// Get broker information from master
var brokerAddress string
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
ClientType: cluster.BrokerType,
})
if err != nil {
return err
}
// Find the first available broker
for _, node := range resp.ClusterNodes {
brokerAddress = node.Address
break
}
return nil
})
if err != nil {
return fmt.Errorf("failed to get broker nodes from master: %v", err)
}
if brokerAddress == "" {
return fmt.Errorf("no active brokers found")
}
// Create gRPC connection
conn, err := grpc.Dial(brokerAddress, s.grpcDialOption)
if err != nil {
return fmt.Errorf("failed to connect to broker: %v", err)
}
defer conn.Close()
client := mq_pb.NewSeaweedMessagingClient(conn)
// First, get the current topic configuration to preserve existing settings
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
currentConfig, err := client.GetTopicConfiguration(ctx, &mq_pb.GetTopicConfigurationRequest{
Topic: &schema_pb.Topic{
Namespace: namespace,
Name: name,
},
})
if err != nil {
return fmt.Errorf("failed to get current topic configuration: %v", err)
}
// Create the topic configuration request, preserving all existing settings
configRequest := &mq_pb.ConfigureTopicRequest{
Topic: &schema_pb.Topic{
Namespace: namespace,
Name: name,
},
// Preserve existing partition count - this is critical!
PartitionCount: currentConfig.PartitionCount,
// Preserve existing record type if it exists
RecordType: currentConfig.RecordType,
}
// Update only the retention configuration
if enabled {
configRequest.Retention = &mq_pb.TopicRetention{
RetentionSeconds: retentionSeconds,
Enabled: true,
}
} else {
// Set retention to disabled
configRequest.Retention = &mq_pb.TopicRetention{
RetentionSeconds: 0,
Enabled: false,
}
}
// Send the configuration request with preserved settings
_, err = client.ConfigureTopic(ctx, configRequest)
if err != nil {
return fmt.Errorf("failed to update topic retention: %v", err)
}
glog.V(0).Infof("Updated topic %s.%s retention (enabled: %v, seconds: %d) while preserving %d partitions",
namespace, name, enabled, retentionSeconds, currentConfig.PartitionCount)
return nil
}
// Shutdown gracefully shuts down the admin server
func (s *AdminServer) Shutdown() {
glog.V(1).Infof("Shutting down admin server...")

View File

@@ -22,11 +22,15 @@ type S3BucketsData struct {
}
type CreateBucketRequest struct {
Name string `json:"name" binding:"required"`
Region string `json:"region"`
QuotaSize int64 `json:"quota_size"` // Quota size in bytes
QuotaUnit string `json:"quota_unit"` // Unit: MB, GB, TB
QuotaEnabled bool `json:"quota_enabled"` // Whether quota is enabled
Name string `json:"name" binding:"required"`
Region string `json:"region"`
QuotaSize int64 `json:"quota_size"` // Quota size in bytes
QuotaUnit string `json:"quota_unit"` // Unit: MB, GB, TB
QuotaEnabled bool `json:"quota_enabled"` // Whether quota is enabled
VersioningEnabled bool `json:"versioning_enabled"` // Whether versioning is enabled
ObjectLockEnabled bool `json:"object_lock_enabled"` // Whether object lock is enabled
ObjectLockMode string `json:"object_lock_mode"` // Object lock mode: "GOVERNANCE" or "COMPLIANCE"
ObjectLockDuration int32 `json:"object_lock_duration"` // Default retention duration in days
}
// S3 Bucket Management Handlers
@@ -89,21 +93,43 @@ func (s *AdminServer) CreateBucket(c *gin.Context) {
return
}
// Validate object lock settings
if req.ObjectLockEnabled {
// Object lock requires versioning to be enabled
req.VersioningEnabled = true
// Validate object lock mode
if req.ObjectLockMode != "GOVERNANCE" && req.ObjectLockMode != "COMPLIANCE" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Object lock mode must be either GOVERNANCE or COMPLIANCE"})
return
}
// Validate retention duration
if req.ObjectLockDuration <= 0 {
c.JSON(http.StatusBadRequest, gin.H{"error": "Object lock duration must be greater than 0 days"})
return
}
}
// Convert quota to bytes
quotaBytes := convertQuotaToBytes(req.QuotaSize, req.QuotaUnit)
err := s.CreateS3BucketWithQuota(req.Name, quotaBytes, req.QuotaEnabled)
err := s.CreateS3BucketWithObjectLock(req.Name, quotaBytes, req.QuotaEnabled, req.VersioningEnabled, req.ObjectLockEnabled, req.ObjectLockMode, req.ObjectLockDuration)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create bucket: " + err.Error()})
return
}
c.JSON(http.StatusCreated, gin.H{
"message": "Bucket created successfully",
"bucket": req.Name,
"quota_size": req.QuotaSize,
"quota_unit": req.QuotaUnit,
"quota_enabled": req.QuotaEnabled,
"message": "Bucket created successfully",
"bucket": req.Name,
"quota_size": req.QuotaSize,
"quota_unit": req.QuotaUnit,
"quota_enabled": req.QuotaEnabled,
"versioning_enabled": req.VersioningEnabled,
"object_lock_enabled": req.ObjectLockEnabled,
"object_lock_mode": req.ObjectLockMode,
"object_lock_duration": req.ObjectLockDuration,
})
}
@@ -258,6 +284,11 @@ func (s *AdminServer) SetBucketQuota(bucketName string, quotaBytes int64, quotaE
// CreateS3BucketWithQuota creates a new S3 bucket with quota settings
func (s *AdminServer) CreateS3BucketWithQuota(bucketName string, quotaBytes int64, quotaEnabled bool) error {
return s.CreateS3BucketWithObjectLock(bucketName, quotaBytes, quotaEnabled, false, false, "", 0)
}
// CreateS3BucketWithObjectLock creates a new S3 bucket with quota, versioning, and object lock settings
func (s *AdminServer) CreateS3BucketWithObjectLock(bucketName string, quotaBytes int64, quotaEnabled, versioningEnabled, objectLockEnabled bool, objectLockMode string, objectLockDuration int32) error {
return s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// First ensure /buckets directory exists
_, err := client.CreateEntry(context.Background(), &filer_pb.CreateEntryRequest{
@@ -299,21 +330,41 @@ func (s *AdminServer) CreateS3BucketWithQuota(bucketName string, quotaBytes int6
quota = 0
}
// Prepare bucket attributes with versioning and object lock metadata
attributes := &filer_pb.FuseAttributes{
FileMode: uint32(0755 | os.ModeDir), // Directory mode
Uid: filer_pb.OS_UID,
Gid: filer_pb.OS_GID,
Crtime: time.Now().Unix(),
Mtime: time.Now().Unix(),
TtlSec: 0,
}
// Create extended attributes map for versioning and object lock
extended := make(map[string][]byte)
if versioningEnabled {
extended["s3.versioning"] = []byte("Enabled")
} else {
extended["s3.versioning"] = []byte("Suspended")
}
if objectLockEnabled {
extended["s3.objectlock"] = []byte("Enabled")
extended["s3.objectlock.mode"] = []byte(objectLockMode)
extended["s3.objectlock.duration"] = []byte(fmt.Sprintf("%d", objectLockDuration))
} else {
extended["s3.objectlock"] = []byte("Disabled")
}
// Create bucket directory under /buckets
_, err = client.CreateEntry(context.Background(), &filer_pb.CreateEntryRequest{
Directory: "/buckets",
Entry: &filer_pb.Entry{
Name: bucketName,
IsDirectory: true,
Attributes: &filer_pb.FuseAttributes{
FileMode: uint32(0755 | os.ModeDir), // Directory mode
Uid: filer_pb.OS_UID,
Gid: filer_pb.OS_GID,
Crtime: time.Now().Unix(),
Mtime: time.Now().Unix(),
TtlSec: 0,
},
Quota: quota,
Attributes: attributes,
Extended: extended,
Quota: quota,
},
})
if err != nil {

View File

@@ -0,0 +1,615 @@
package dash
import (
"context"
"fmt"
"io"
"path/filepath"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/cluster"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
)
// GetTopics retrieves message queue topics data
func (s *AdminServer) GetTopics() (*TopicsData, error) {
var topics []TopicInfo
// Find broker leader and get topics
brokerLeader, err := s.findBrokerLeader()
if err != nil {
// If no broker leader found, return empty data
return &TopicsData{
Topics: topics,
TotalTopics: len(topics),
LastUpdated: time.Now(),
}, nil
}
// Connect to broker leader and list topics
err = s.withBrokerClient(brokerLeader, func(client mq_pb.SeaweedMessagingClient) error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
resp, err := client.ListTopics(ctx, &mq_pb.ListTopicsRequest{})
if err != nil {
return err
}
// Convert protobuf topics to TopicInfo - only include available data
for _, pbTopic := range resp.Topics {
topicInfo := TopicInfo{
Name: fmt.Sprintf("%s.%s", pbTopic.Namespace, pbTopic.Name),
Partitions: 0, // Will be populated by LookupTopicBrokers call
Retention: TopicRetentionInfo{
Enabled: false,
DisplayValue: 0,
DisplayUnit: "days",
},
}
// Get topic configuration to get partition count and retention info
lookupResp, err := client.LookupTopicBrokers(ctx, &mq_pb.LookupTopicBrokersRequest{
Topic: pbTopic,
})
if err == nil {
topicInfo.Partitions = len(lookupResp.BrokerPartitionAssignments)
}
// Get topic configuration for retention information
configResp, err := client.GetTopicConfiguration(ctx, &mq_pb.GetTopicConfigurationRequest{
Topic: pbTopic,
})
if err == nil && configResp.Retention != nil {
topicInfo.Retention = convertTopicRetention(configResp.Retention)
}
topics = append(topics, topicInfo)
}
return nil
})
if err != nil {
// If connection fails, return empty data
return &TopicsData{
Topics: topics,
TotalTopics: len(topics),
LastUpdated: time.Now(),
}, nil
}
return &TopicsData{
Topics: topics,
TotalTopics: len(topics),
LastUpdated: time.Now(),
// Don't include TotalMessages and TotalSize as they're not available
}, nil
}
// GetSubscribers retrieves message queue subscribers data
func (s *AdminServer) GetSubscribers() (*SubscribersData, error) {
var subscribers []SubscriberInfo
// Find broker leader and get subscriber info from broker stats
brokerLeader, err := s.findBrokerLeader()
if err != nil {
// If no broker leader found, return empty data
return &SubscribersData{
Subscribers: subscribers,
TotalSubscribers: len(subscribers),
ActiveSubscribers: 0,
LastUpdated: time.Now(),
}, nil
}
// Connect to broker leader and get subscriber information
// Note: SeaweedMQ doesn't have a direct API to list all subscribers
// We would need to collect this information from broker statistics
// For now, return empty data structure as subscriber info is not
// directly available through the current MQ API
err = s.withBrokerClient(brokerLeader, func(client mq_pb.SeaweedMessagingClient) error {
// TODO: Implement subscriber data collection from broker statistics
// This would require access to broker internal statistics about
// active subscribers, consumer groups, etc.
return nil
})
if err != nil {
// If connection fails, return empty data
return &SubscribersData{
Subscribers: subscribers,
TotalSubscribers: len(subscribers),
ActiveSubscribers: 0,
LastUpdated: time.Now(),
}, nil
}
activeCount := 0
for _, sub := range subscribers {
if sub.Status == "active" {
activeCount++
}
}
return &SubscribersData{
Subscribers: subscribers,
TotalSubscribers: len(subscribers),
ActiveSubscribers: activeCount,
LastUpdated: time.Now(),
}, nil
}
// GetTopicDetails retrieves detailed information about a specific topic
func (s *AdminServer) GetTopicDetails(namespace, topicName string) (*TopicDetailsData, error) {
// Find broker leader
brokerLeader, err := s.findBrokerLeader()
if err != nil {
return nil, fmt.Errorf("failed to find broker leader: %v", err)
}
var topicDetails *TopicDetailsData
// Connect to broker leader and get topic configuration
err = s.withBrokerClient(brokerLeader, func(client mq_pb.SeaweedMessagingClient) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Get topic configuration using the new API
configResp, err := client.GetTopicConfiguration(ctx, &mq_pb.GetTopicConfigurationRequest{
Topic: &schema_pb.Topic{
Namespace: namespace,
Name: topicName,
},
})
if err != nil {
return fmt.Errorf("failed to get topic configuration: %v", err)
}
// Initialize topic details
topicDetails = &TopicDetailsData{
TopicName: fmt.Sprintf("%s.%s", namespace, topicName),
Namespace: namespace,
Name: topicName,
Partitions: []PartitionInfo{},
Schema: []SchemaFieldInfo{},
Publishers: []PublisherInfo{},
Subscribers: []TopicSubscriberInfo{},
ConsumerGroupOffsets: []ConsumerGroupOffsetInfo{},
Retention: convertTopicRetention(configResp.Retention),
CreatedAt: time.Unix(0, configResp.CreatedAtNs),
LastUpdated: time.Unix(0, configResp.LastUpdatedNs),
}
// Set current time if timestamps are not available
if configResp.CreatedAtNs == 0 {
topicDetails.CreatedAt = time.Now()
}
if configResp.LastUpdatedNs == 0 {
topicDetails.LastUpdated = time.Now()
}
// Process partitions
for _, assignment := range configResp.BrokerPartitionAssignments {
if assignment.Partition != nil {
partitionInfo := PartitionInfo{
ID: assignment.Partition.RangeStart,
LeaderBroker: assignment.LeaderBroker,
FollowerBroker: assignment.FollowerBroker,
MessageCount: 0, // Will be enhanced later with actual stats
TotalSize: 0, // Will be enhanced later with actual stats
LastDataTime: time.Time{}, // Will be enhanced later
CreatedAt: time.Now(),
}
topicDetails.Partitions = append(topicDetails.Partitions, partitionInfo)
}
}
// Process schema from RecordType
if configResp.RecordType != nil {
topicDetails.Schema = convertRecordTypeToSchemaFields(configResp.RecordType)
}
// Get publishers information
publishersResp, err := client.GetTopicPublishers(ctx, &mq_pb.GetTopicPublishersRequest{
Topic: &schema_pb.Topic{
Namespace: namespace,
Name: topicName,
},
})
if err != nil {
// Log error but don't fail the entire request
glog.V(0).Infof("failed to get topic publishers for %s.%s: %v", namespace, topicName, err)
} else {
glog.V(1).Infof("got %d publishers for topic %s.%s", len(publishersResp.Publishers), namespace, topicName)
topicDetails.Publishers = convertTopicPublishers(publishersResp.Publishers)
}
// Get subscribers information
subscribersResp, err := client.GetTopicSubscribers(ctx, &mq_pb.GetTopicSubscribersRequest{
Topic: &schema_pb.Topic{
Namespace: namespace,
Name: topicName,
},
})
if err != nil {
// Log error but don't fail the entire request
glog.V(0).Infof("failed to get topic subscribers for %s.%s: %v", namespace, topicName, err)
} else {
glog.V(1).Infof("got %d subscribers for topic %s.%s", len(subscribersResp.Subscribers), namespace, topicName)
topicDetails.Subscribers = convertTopicSubscribers(subscribersResp.Subscribers)
}
return nil
})
if err != nil {
return nil, err
}
// Get consumer group offsets from the filer
offsets, err := s.GetConsumerGroupOffsets(namespace, topicName)
if err != nil {
// Log error but don't fail the entire request
glog.V(0).Infof("failed to get consumer group offsets for %s.%s: %v", namespace, topicName, err)
} else {
glog.V(1).Infof("got %d consumer group offsets for topic %s.%s", len(offsets), namespace, topicName)
topicDetails.ConsumerGroupOffsets = offsets
}
return topicDetails, nil
}
// GetConsumerGroupOffsets retrieves consumer group offsets for a topic from the filer
func (s *AdminServer) GetConsumerGroupOffsets(namespace, topicName string) ([]ConsumerGroupOffsetInfo, error) {
var offsets []ConsumerGroupOffsetInfo
err := s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// Get the topic directory: /topics/namespace/topicName
topicObj := topic.NewTopic(namespace, topicName)
topicDir := topicObj.Dir()
// List all version directories under the topic directory (e.g., v2025-07-10-05-44-34)
versionStream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{
Directory: topicDir,
Prefix: "",
StartFromFileName: "",
InclusiveStartFrom: false,
Limit: 1000,
})
if err != nil {
return fmt.Errorf("failed to list topic directory %s: %v", topicDir, err)
}
// Process each version directory
for {
versionResp, err := versionStream.Recv()
if err != nil {
if err == io.EOF {
break
}
return fmt.Errorf("failed to receive version entries: %v", err)
}
// Only process directories that are versions (start with "v")
if versionResp.Entry.IsDirectory && strings.HasPrefix(versionResp.Entry.Name, "v") {
versionDir := filepath.Join(topicDir, versionResp.Entry.Name)
// List all partition directories under the version directory (e.g., 0315-0630)
partitionStream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{
Directory: versionDir,
Prefix: "",
StartFromFileName: "",
InclusiveStartFrom: false,
Limit: 1000,
})
if err != nil {
glog.Warningf("Failed to list version directory %s: %v", versionDir, err)
continue
}
// Process each partition directory
for {
partitionResp, err := partitionStream.Recv()
if err != nil {
if err == io.EOF {
break
}
glog.Warningf("Failed to receive partition entries: %v", err)
break
}
// Only process directories that are partitions (format: NNNN-NNNN)
if partitionResp.Entry.IsDirectory {
// Parse partition range to get partition start ID (e.g., "0315-0630" -> 315)
var partitionStart, partitionStop int32
if n, err := fmt.Sscanf(partitionResp.Entry.Name, "%04d-%04d", &partitionStart, &partitionStop); n != 2 || err != nil {
// Skip directories that don't match the partition format
continue
}
partitionDir := filepath.Join(versionDir, partitionResp.Entry.Name)
// List all .offset files in this partition directory
offsetStream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{
Directory: partitionDir,
Prefix: "",
StartFromFileName: "",
InclusiveStartFrom: false,
Limit: 1000,
})
if err != nil {
glog.Warningf("Failed to list partition directory %s: %v", partitionDir, err)
continue
}
// Process each offset file
for {
offsetResp, err := offsetStream.Recv()
if err != nil {
if err == io.EOF {
break
}
glog.Warningf("Failed to receive offset entries: %v", err)
break
}
// Only process .offset files
if !offsetResp.Entry.IsDirectory && strings.HasSuffix(offsetResp.Entry.Name, ".offset") {
consumerGroup := strings.TrimSuffix(offsetResp.Entry.Name, ".offset")
// Read the offset value from the file
offsetData, err := filer.ReadInsideFiler(client, partitionDir, offsetResp.Entry.Name)
if err != nil {
glog.Warningf("Failed to read offset file %s: %v", offsetResp.Entry.Name, err)
continue
}
if len(offsetData) == 8 {
offset := int64(util.BytesToUint64(offsetData))
// Get the file modification time
lastUpdated := time.Unix(offsetResp.Entry.Attributes.Mtime, 0)
offsets = append(offsets, ConsumerGroupOffsetInfo{
ConsumerGroup: consumerGroup,
PartitionID: partitionStart, // Use partition start as the ID
Offset: offset,
LastUpdated: lastUpdated,
})
}
}
}
}
}
}
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to get consumer group offsets: %v", err)
}
return offsets, nil
}
// convertRecordTypeToSchemaFields converts a protobuf RecordType to SchemaFieldInfo slice
func convertRecordTypeToSchemaFields(recordType *schema_pb.RecordType) []SchemaFieldInfo {
var schemaFields []SchemaFieldInfo
if recordType == nil || recordType.Fields == nil {
return schemaFields
}
for _, field := range recordType.Fields {
schemaField := SchemaFieldInfo{
Name: field.Name,
Type: getFieldTypeString(field.Type),
Required: field.IsRequired,
}
schemaFields = append(schemaFields, schemaField)
}
return schemaFields
}
// getFieldTypeString converts a protobuf Type to a human-readable string
func getFieldTypeString(fieldType *schema_pb.Type) string {
if fieldType == nil {
return "unknown"
}
switch kind := fieldType.Kind.(type) {
case *schema_pb.Type_ScalarType:
return getScalarTypeString(kind.ScalarType)
case *schema_pb.Type_RecordType:
return "record"
case *schema_pb.Type_ListType:
elementType := getFieldTypeString(kind.ListType.ElementType)
return fmt.Sprintf("list<%s>", elementType)
default:
return "unknown"
}
}
// getScalarTypeString converts a protobuf ScalarType to a string
func getScalarTypeString(scalarType schema_pb.ScalarType) string {
switch scalarType {
case schema_pb.ScalarType_BOOL:
return "bool"
case schema_pb.ScalarType_INT32:
return "int32"
case schema_pb.ScalarType_INT64:
return "int64"
case schema_pb.ScalarType_FLOAT:
return "float"
case schema_pb.ScalarType_DOUBLE:
return "double"
case schema_pb.ScalarType_BYTES:
return "bytes"
case schema_pb.ScalarType_STRING:
return "string"
default:
return "unknown"
}
}
// convertTopicPublishers converts protobuf TopicPublisher slice to PublisherInfo slice
func convertTopicPublishers(publishers []*mq_pb.TopicPublisher) []PublisherInfo {
publisherInfos := make([]PublisherInfo, 0, len(publishers))
for _, publisher := range publishers {
publisherInfo := PublisherInfo{
PublisherName: publisher.PublisherName,
ClientID: publisher.ClientId,
PartitionID: publisher.Partition.RangeStart,
Broker: publisher.Broker,
IsActive: publisher.IsActive,
LastPublishedOffset: publisher.LastPublishedOffset,
LastAckedOffset: publisher.LastAckedOffset,
}
// Convert timestamps
if publisher.ConnectTimeNs > 0 {
publisherInfo.ConnectTime = time.Unix(0, publisher.ConnectTimeNs)
}
if publisher.LastSeenTimeNs > 0 {
publisherInfo.LastSeenTime = time.Unix(0, publisher.LastSeenTimeNs)
}
publisherInfos = append(publisherInfos, publisherInfo)
}
return publisherInfos
}
// convertTopicSubscribers converts protobuf TopicSubscriber slice to TopicSubscriberInfo slice
func convertTopicSubscribers(subscribers []*mq_pb.TopicSubscriber) []TopicSubscriberInfo {
subscriberInfos := make([]TopicSubscriberInfo, 0, len(subscribers))
for _, subscriber := range subscribers {
subscriberInfo := TopicSubscriberInfo{
ConsumerGroup: subscriber.ConsumerGroup,
ConsumerID: subscriber.ConsumerId,
ClientID: subscriber.ClientId,
PartitionID: subscriber.Partition.RangeStart,
Broker: subscriber.Broker,
IsActive: subscriber.IsActive,
CurrentOffset: subscriber.CurrentOffset,
LastReceivedOffset: subscriber.LastReceivedOffset,
}
// Convert timestamps
if subscriber.ConnectTimeNs > 0 {
subscriberInfo.ConnectTime = time.Unix(0, subscriber.ConnectTimeNs)
}
if subscriber.LastSeenTimeNs > 0 {
subscriberInfo.LastSeenTime = time.Unix(0, subscriber.LastSeenTimeNs)
}
subscriberInfos = append(subscriberInfos, subscriberInfo)
}
return subscriberInfos
}
// findBrokerLeader finds the current broker leader
func (s *AdminServer) findBrokerLeader() (string, error) {
// First, try to find any broker from the cluster
var brokers []string
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
ClientType: cluster.BrokerType,
})
if err != nil {
return err
}
for _, node := range resp.ClusterNodes {
brokers = append(brokers, node.Address)
}
return nil
})
if err != nil {
return "", fmt.Errorf("failed to list brokers: %v", err)
}
if len(brokers) == 0 {
return "", fmt.Errorf("no brokers found in cluster")
}
// Try each broker to find the leader
for _, brokerAddr := range brokers {
err := s.withBrokerClient(brokerAddr, func(client mq_pb.SeaweedMessagingClient) error {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// Try to find broker leader
_, err := client.FindBrokerLeader(ctx, &mq_pb.FindBrokerLeaderRequest{
FilerGroup: "",
})
if err == nil {
return nil // This broker is the leader
}
return err
})
if err == nil {
return brokerAddr, nil
}
}
return "", fmt.Errorf("no broker leader found")
}
// withBrokerClient connects to a message queue broker and executes a function
func (s *AdminServer) withBrokerClient(brokerAddress string, fn func(client mq_pb.SeaweedMessagingClient) error) error {
return pb.WithBrokerGrpcClient(false, brokerAddress, s.grpcDialOption, fn)
}
// convertTopicRetention converts protobuf retention to TopicRetentionInfo
func convertTopicRetention(retention *mq_pb.TopicRetention) TopicRetentionInfo {
if retention == nil || !retention.Enabled {
return TopicRetentionInfo{
Enabled: false,
RetentionSeconds: 0,
DisplayValue: 0,
DisplayUnit: "days",
}
}
// Convert seconds to human-readable format
seconds := retention.RetentionSeconds
var displayValue int32
var displayUnit string
if seconds >= 86400 { // >= 1 day
displayValue = int32(seconds / 86400)
displayUnit = "days"
} else if seconds >= 3600 { // >= 1 hour
displayValue = int32(seconds / 3600)
displayUnit = "hours"
} else {
displayValue = int32(seconds)
displayUnit = "seconds"
}
return TopicRetentionInfo{
Enabled: retention.Enabled,
RetentionSeconds: seconds,
DisplayValue: displayValue,
DisplayUnit: displayUnit,
}
}

View File

@@ -0,0 +1,296 @@
package dash
import (
"context"
"fmt"
"io"
"path/filepath"
"sort"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
)
// TopicRetentionPurger handles topic data purging based on retention policies
type TopicRetentionPurger struct {
adminServer *AdminServer
}
// NewTopicRetentionPurger creates a new topic retention purger
func NewTopicRetentionPurger(adminServer *AdminServer) *TopicRetentionPurger {
return &TopicRetentionPurger{
adminServer: adminServer,
}
}
// PurgeExpiredTopicData purges expired topic data based on retention policies
func (p *TopicRetentionPurger) PurgeExpiredTopicData() error {
glog.V(1).Infof("Starting topic data purge based on retention policies")
// Get all topics with retention enabled
topics, err := p.getTopicsWithRetention()
if err != nil {
return fmt.Errorf("failed to get topics with retention: %v", err)
}
glog.V(1).Infof("Found %d topics with retention enabled", len(topics))
// Process each topic
for _, topicRetention := range topics {
err := p.purgeTopicData(topicRetention)
if err != nil {
glog.Errorf("Failed to purge data for topic %s: %v", topicRetention.TopicName, err)
continue
}
}
glog.V(1).Infof("Completed topic data purge")
return nil
}
// TopicRetentionConfig represents a topic with its retention configuration
type TopicRetentionConfig struct {
TopicName string
Namespace string
Name string
RetentionSeconds int64
}
// getTopicsWithRetention retrieves all topics that have retention enabled
func (p *TopicRetentionPurger) getTopicsWithRetention() ([]TopicRetentionConfig, error) {
var topicsWithRetention []TopicRetentionConfig
// Find broker leader to get topics
brokerLeader, err := p.adminServer.findBrokerLeader()
if err != nil {
return nil, fmt.Errorf("failed to find broker leader: %v", err)
}
// Get all topics from the broker
err = p.adminServer.withBrokerClient(brokerLeader, func(client mq_pb.SeaweedMessagingClient) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
resp, err := client.ListTopics(ctx, &mq_pb.ListTopicsRequest{})
if err != nil {
return err
}
// Check each topic for retention configuration
for _, pbTopic := range resp.Topics {
configResp, err := client.GetTopicConfiguration(ctx, &mq_pb.GetTopicConfigurationRequest{
Topic: pbTopic,
})
if err != nil {
glog.Warningf("Failed to get configuration for topic %s.%s: %v", pbTopic.Namespace, pbTopic.Name, err)
continue
}
// Check if retention is enabled
if configResp.Retention != nil && configResp.Retention.Enabled && configResp.Retention.RetentionSeconds > 0 {
topicRetention := TopicRetentionConfig{
TopicName: fmt.Sprintf("%s.%s", pbTopic.Namespace, pbTopic.Name),
Namespace: pbTopic.Namespace,
Name: pbTopic.Name,
RetentionSeconds: configResp.Retention.RetentionSeconds,
}
topicsWithRetention = append(topicsWithRetention, topicRetention)
}
}
return nil
})
if err != nil {
return nil, err
}
return topicsWithRetention, nil
}
// purgeTopicData purges expired data for a specific topic
func (p *TopicRetentionPurger) purgeTopicData(topicRetention TopicRetentionConfig) error {
glog.V(1).Infof("Purging expired data for topic %s with retention %d seconds", topicRetention.TopicName, topicRetention.RetentionSeconds)
// Calculate cutoff time
cutoffTime := time.Now().Add(-time.Duration(topicRetention.RetentionSeconds) * time.Second)
// Get topic directory
topicObj := topic.NewTopic(topicRetention.Namespace, topicRetention.Name)
topicDir := topicObj.Dir()
var purgedDirs []string
err := p.adminServer.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// List all version directories under the topic directory
versionStream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{
Directory: topicDir,
Prefix: "",
StartFromFileName: "",
InclusiveStartFrom: false,
Limit: 1000,
})
if err != nil {
return fmt.Errorf("failed to list topic directory %s: %v", topicDir, err)
}
var versionDirs []VersionDirInfo
// Collect all version directories
for {
versionResp, err := versionStream.Recv()
if err != nil {
if err == io.EOF {
break
}
return fmt.Errorf("failed to receive version entries: %v", err)
}
// Only process directories that are versions (start with "v")
if versionResp.Entry.IsDirectory && strings.HasPrefix(versionResp.Entry.Name, "v") {
versionTime, err := p.parseVersionTime(versionResp.Entry.Name)
if err != nil {
glog.Warningf("Failed to parse version time from %s: %v", versionResp.Entry.Name, err)
continue
}
versionDirs = append(versionDirs, VersionDirInfo{
Name: versionResp.Entry.Name,
VersionTime: versionTime,
ModTime: time.Unix(versionResp.Entry.Attributes.Mtime, 0),
})
}
}
// Sort version directories by time (oldest first)
sort.Slice(versionDirs, func(i, j int) bool {
return versionDirs[i].VersionTime.Before(versionDirs[j].VersionTime)
})
// Keep at least the most recent version directory, even if it's expired
if len(versionDirs) <= 1 {
glog.V(1).Infof("Topic %s has %d version directories, keeping all", topicRetention.TopicName, len(versionDirs))
return nil
}
// Purge expired directories (keep the most recent one)
for i := 0; i < len(versionDirs)-1; i++ {
versionDir := versionDirs[i]
// Check if this version directory is expired
if versionDir.VersionTime.Before(cutoffTime) {
dirPath := filepath.Join(topicDir, versionDir.Name)
// Delete the entire version directory
err := p.deleteDirectoryRecursively(client, dirPath)
if err != nil {
glog.Errorf("Failed to delete expired directory %s: %v", dirPath, err)
} else {
purgedDirs = append(purgedDirs, dirPath)
glog.V(1).Infof("Purged expired directory: %s (created: %s)", dirPath, versionDir.VersionTime.Format("2006-01-02 15:04:05"))
}
}
}
return nil
})
if err != nil {
return err
}
if len(purgedDirs) > 0 {
glog.V(0).Infof("Purged %d expired directories for topic %s", len(purgedDirs), topicRetention.TopicName)
}
return nil
}
// VersionDirInfo represents a version directory with its timestamp
type VersionDirInfo struct {
Name string
VersionTime time.Time
ModTime time.Time
}
// parseVersionTime parses the version directory name to extract the timestamp
// Version format: v2025-01-10-05-44-34
func (p *TopicRetentionPurger) parseVersionTime(versionName string) (time.Time, error) {
// Remove the 'v' prefix
if !strings.HasPrefix(versionName, "v") {
return time.Time{}, fmt.Errorf("invalid version format: %s", versionName)
}
timeStr := versionName[1:] // Remove 'v'
// Parse the time format: 2025-01-10-05-44-34
versionTime, err := time.Parse("2006-01-02-15-04-05", timeStr)
if err != nil {
return time.Time{}, fmt.Errorf("failed to parse version time %s: %v", timeStr, err)
}
return versionTime, nil
}
// deleteDirectoryRecursively deletes a directory and all its contents
func (p *TopicRetentionPurger) deleteDirectoryRecursively(client filer_pb.SeaweedFilerClient, dirPath string) error {
// List all entries in the directory
stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{
Directory: dirPath,
Prefix: "",
StartFromFileName: "",
InclusiveStartFrom: false,
Limit: 1000,
})
if err != nil {
return fmt.Errorf("failed to list directory %s: %v", dirPath, err)
}
// Delete all entries
for {
resp, err := stream.Recv()
if err != nil {
if err == io.EOF {
break
}
return fmt.Errorf("failed to receive entries: %v", err)
}
entryPath := filepath.Join(dirPath, resp.Entry.Name)
if resp.Entry.IsDirectory {
// Recursively delete subdirectory
err = p.deleteDirectoryRecursively(client, entryPath)
if err != nil {
return fmt.Errorf("failed to delete subdirectory %s: %v", entryPath, err)
}
} else {
// Delete file
_, err = client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{
Directory: dirPath,
Name: resp.Entry.Name,
})
if err != nil {
return fmt.Errorf("failed to delete file %s: %v", entryPath, err)
}
}
}
// Delete the directory itself
parentDir := filepath.Dir(dirPath)
dirName := filepath.Base(dirPath)
_, err = client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{
Directory: parentDir,
Name: dirName,
})
if err != nil {
return fmt.Errorf("failed to delete directory %s: %v", dirPath, err)
}
return nil
}

View File

@@ -48,13 +48,17 @@ type VolumeServer struct {
// S3 Bucket management structures
type S3Bucket struct {
Name string `json:"name"`
CreatedAt time.Time `json:"created_at"`
Size int64 `json:"size"`
ObjectCount int64 `json:"object_count"`
LastModified time.Time `json:"last_modified"`
Quota int64 `json:"quota"` // Quota in bytes, 0 means no quota
QuotaEnabled bool `json:"quota_enabled"` // Whether quota is enabled
Name string `json:"name"`
CreatedAt time.Time `json:"created_at"`
Size int64 `json:"size"`
ObjectCount int64 `json:"object_count"`
LastModified time.Time `json:"last_modified"`
Quota int64 `json:"quota"` // Quota in bytes, 0 means no quota
QuotaEnabled bool `json:"quota_enabled"` // Whether quota is enabled
VersioningEnabled bool `json:"versioning_enabled"` // Whether versioning is enabled
ObjectLockEnabled bool `json:"object_lock_enabled"` // Whether object lock is enabled
ObjectLockMode string `json:"object_lock_mode"` // Object lock mode: "GOVERNANCE" or "COMPLIANCE"
ObjectLockDuration int32 `json:"object_lock_duration"` // Default retention duration in days
}
type S3Object struct {
@@ -189,6 +193,132 @@ type ClusterFilersData struct {
LastUpdated time.Time `json:"last_updated"`
}
type MessageBrokerInfo struct {
Address string `json:"address"`
DataCenter string `json:"datacenter"`
Rack string `json:"rack"`
Version string `json:"version"`
CreatedAt time.Time `json:"created_at"`
}
type ClusterBrokersData struct {
Username string `json:"username"`
Brokers []MessageBrokerInfo `json:"brokers"`
TotalBrokers int `json:"total_brokers"`
LastUpdated time.Time `json:"last_updated"`
}
type TopicInfo struct {
Name string `json:"name"`
Partitions int `json:"partitions"`
Subscribers int `json:"subscribers"`
MessageCount int64 `json:"message_count"`
TotalSize int64 `json:"total_size"`
LastMessage time.Time `json:"last_message"`
CreatedAt time.Time `json:"created_at"`
Retention TopicRetentionInfo `json:"retention"`
}
type TopicsData struct {
Username string `json:"username"`
Topics []TopicInfo `json:"topics"`
TotalTopics int `json:"total_topics"`
TotalMessages int64 `json:"total_messages"`
TotalSize int64 `json:"total_size"`
LastUpdated time.Time `json:"last_updated"`
}
type SubscriberInfo struct {
Name string `json:"name"`
Topic string `json:"topic"`
ConsumerGroup string `json:"consumer_group"`
Status string `json:"status"`
LastSeen time.Time `json:"last_seen"`
MessageCount int64 `json:"message_count"`
CreatedAt time.Time `json:"created_at"`
}
type SubscribersData struct {
Username string `json:"username"`
Subscribers []SubscriberInfo `json:"subscribers"`
TotalSubscribers int `json:"total_subscribers"`
ActiveSubscribers int `json:"active_subscribers"`
LastUpdated time.Time `json:"last_updated"`
}
// Topic Details structures
type PartitionInfo struct {
ID int32 `json:"id"`
LeaderBroker string `json:"leader_broker"`
FollowerBroker string `json:"follower_broker"`
MessageCount int64 `json:"message_count"`
TotalSize int64 `json:"total_size"`
LastDataTime time.Time `json:"last_data_time"`
CreatedAt time.Time `json:"created_at"`
}
type SchemaFieldInfo struct {
Name string `json:"name"`
Type string `json:"type"`
Required bool `json:"required"`
}
type PublisherInfo struct {
PublisherName string `json:"publisher_name"`
ClientID string `json:"client_id"`
PartitionID int32 `json:"partition_id"`
Broker string `json:"broker"`
ConnectTime time.Time `json:"connect_time"`
LastSeenTime time.Time `json:"last_seen_time"`
IsActive bool `json:"is_active"`
LastPublishedOffset int64 `json:"last_published_offset"`
LastAckedOffset int64 `json:"last_acked_offset"`
}
type TopicSubscriberInfo struct {
ConsumerGroup string `json:"consumer_group"`
ConsumerID string `json:"consumer_id"`
ClientID string `json:"client_id"`
PartitionID int32 `json:"partition_id"`
Broker string `json:"broker"`
ConnectTime time.Time `json:"connect_time"`
LastSeenTime time.Time `json:"last_seen_time"`
IsActive bool `json:"is_active"`
CurrentOffset int64 `json:"current_offset"` // last acknowledged offset
LastReceivedOffset int64 `json:"last_received_offset"` // last received offset
}
type ConsumerGroupOffsetInfo struct {
ConsumerGroup string `json:"consumer_group"`
PartitionID int32 `json:"partition_id"`
Offset int64 `json:"offset"`
LastUpdated time.Time `json:"last_updated"`
}
type TopicRetentionInfo struct {
Enabled bool `json:"enabled"`
RetentionSeconds int64 `json:"retention_seconds"`
DisplayValue int32 `json:"display_value"` // for UI rendering
DisplayUnit string `json:"display_unit"` // for UI rendering
}
type TopicDetailsData struct {
Username string `json:"username"`
TopicName string `json:"topic_name"`
Namespace string `json:"namespace"`
Name string `json:"name"`
Partitions []PartitionInfo `json:"partitions"`
Schema []SchemaFieldInfo `json:"schema"`
Publishers []PublisherInfo `json:"publishers"`
Subscribers []TopicSubscriberInfo `json:"subscribers"`
ConsumerGroupOffsets []ConsumerGroupOffsetInfo `json:"consumer_group_offsets"`
Retention TopicRetentionInfo `json:"retention"`
MessageCount int64 `json:"message_count"`
TotalSize int64 `json:"total_size"`
CreatedAt time.Time `json:"created_at"`
LastUpdated time.Time `json:"last_updated"`
}
// Volume server management structures
type ClusterVolumeServersData struct {
Username string `json:"username"`