* admin: add plugin runtime UI page and route wiring * pb: add plugin gRPC contract and generated bindings * admin/plugin: implement worker registry, runtime, monitoring, and config store * admin/dash: wire plugin runtime and expose plugin workflow APIs * command: add flags to enable plugin runtime * admin: rename remaining plugin v2 wording to plugin * admin/plugin: add detectable job type registry helper * admin/plugin: add scheduled detection and dispatch orchestration * admin/plugin: prefetch job type descriptors when workers connect * admin/plugin: add known job type discovery API and UI * admin/plugin: refresh design doc to match current implementation * admin/plugin: enforce per-worker scheduler concurrency limits * admin/plugin: use descriptor runtime defaults for scheduler policy * admin/ui: auto-load first known plugin job type on page open * admin/plugin: bootstrap persisted config from descriptor defaults * admin/plugin: dedupe scheduled proposals by dedupe key * admin/ui: add job type and state filters for plugin monitoring * admin/ui: add per-job-type plugin activity summary * admin/plugin: split descriptor read API from schema refresh * admin/ui: keep plugin summary metrics global while tables are filtered * admin/plugin: retry executor reservation before timing out * admin/plugin: expose scheduler states for monitoring * admin/ui: show per-job-type scheduler states in plugin monitor * pb/plugin: rename protobuf package to plugin * admin/plugin: rename pluginRuntime wiring to plugin * admin/plugin: remove runtime naming from plugin APIs and UI * admin/plugin: rename runtime files to plugin naming * admin/plugin: persist jobs and activities for monitor recovery * admin/plugin: lease one detector worker per job type * admin/ui: show worker load from plugin heartbeats * admin/plugin: skip stale workers for detector and executor picks * plugin/worker: add plugin worker command and stream runtime scaffold * plugin/worker: implement vacuum detect and execute handlers * admin/plugin: document external vacuum plugin worker starter * command: update plugin.worker help to reflect implemented flow * command/admin: drop legacy Plugin V2 label * plugin/worker: validate vacuum job type and respect min interval * plugin/worker: test no-op detect when min interval not elapsed * command/admin: document plugin.worker external process * plugin/worker: advertise configured concurrency in hello * command/plugin.worker: add jobType handler selection * command/plugin.worker: test handler selection by job type * command/plugin.worker: persist worker id in workingDir * admin/plugin: document plugin.worker jobType and workingDir flags * plugin/worker: support cancel request for in-flight work * plugin/worker: test cancel request acknowledgements * command/plugin.worker: document workingDir and jobType behavior * plugin/worker: emit executor activity events for monitor * plugin/worker: test executor activity builder * admin/plugin: send last successful run in detection request * admin/plugin: send cancel request when detect or execute context ends * admin/plugin: document worker cancel request responsibility * admin/handlers: expose plugin scheduler states API in no-auth mode * admin/handlers: test plugin scheduler states route registration * admin/plugin: keep worker id on worker-generated activity records * admin/plugin: test worker id propagation in monitor activities * admin/dash: always initialize plugin service * command/admin: remove plugin enable flags and default to enabled * admin/dash: drop pluginEnabled constructor parameter * admin/plugin UI: stop checking plugin enabled state * admin/plugin: remove docs for plugin enable flags * admin/dash: remove unused plugin enabled check method * admin/dash: fallback to in-memory plugin init when dataDir fails * admin/plugin API: expose worker gRPC port in status * command/plugin.worker: resolve admin gRPC port via plugin status * split plugin UI into overview/configuration/monitoring pages * Update layout_templ.go * add volume_balance plugin worker handler * wire plugin.worker CLI for volume_balance job type * add erasure_coding plugin worker handler * wire plugin.worker CLI for erasure_coding job type * support multi-job handlers in plugin worker runtime * allow plugin.worker jobType as comma-separated list * admin/plugin UI: rename to Workers and simplify config view * plugin worker: queue detection requests instead of capacity reject * Update plugin_worker.go * plugin volume_balance: remove force_move/timeout from worker config UI * plugin erasure_coding: enforce local working dir and cleanup * admin/plugin UI: rename admin settings to job scheduling * admin/plugin UI: persist and robustly render detection results * admin/plugin: record and return detection trace metadata * admin/plugin UI: show detection process and decision trace * plugin: surface detector decision trace as activities * mini: start a plugin worker by default * admin/plugin UI: split monitoring into detection and execution tabs * plugin worker: emit detection decision trace for EC and balance * admin workers UI: split monitoring into detection and execution pages * plugin scheduler: skip proposals for active assigned/running jobs * admin workers UI: add job queue tab * plugin worker: add dummy stress detector and executor job type * admin workers UI: reorder tabs to detection queue execution * admin workers UI: regenerate plugin template * plugin defaults: include dummy stress and add stress tests * plugin dummy stress: rotate detection selections across runs * plugin scheduler: remove cross-run proposal dedupe * plugin queue: track pending scheduled jobs * plugin scheduler: wait for executor capacity before dispatch * plugin scheduler: skip detection when waiting backlog is high * plugin: add disk-backed job detail API and persistence * admin ui: show plugin job detail modal from job id links * plugin: generate unique job ids instead of reusing proposal ids * plugin worker: emit heartbeats on work state changes * plugin registry: round-robin tied executor and detector picks * add temporary EC overnight stress runner * plugin job details: persist and render EC execution plans * ec volume details: color data and parity shard badges * shard labels: keep parity ids numeric and color-only distinction * admin: remove legacy maintenance UI routes and templates * admin: remove dead maintenance endpoint helpers * Update layout_templ.go * remove dummy_stress worker and command support * refactor plugin UI to job-type top tabs and sub-tabs * migrate weed worker command to plugin runtime * remove plugin.worker command and keep worker runtime with metrics * update helm worker args for jobType and execution flags * set plugin scheduling defaults to global 16 and per-worker 4 * stress: fix RPC context reuse and remove redundant variables in ec_stress_runner * admin/plugin: fix lifecycle races, safe channel operations, and terminal state constants * admin/dash: randomize job IDs and fix priority zero-value overwrite in plugin API * admin/handlers: implement buffered rendering to prevent response corruption * admin/plugin: implement debounced persistence flusher and optimize BuildJobDetail memory lookups * admin/plugin: fix priority overwrite and implement bounded wait in scheduler reserve * admin/plugin: implement atomic file writes and fix run record side effects * admin/plugin: use P prefix for parity shard labels in execution plans * admin/plugin: enable parallel execution for cancellation tests * admin: refactor time.Time fields to pointers for better JSON omitempty support * admin/plugin: implement pointer-safe time assignments and comparisons in plugin core * admin/plugin: fix time assignment and sorting logic in plugin monitor after pointer refactor * admin/plugin: update scheduler activity tracking to use time pointers * admin/plugin: fix time-based run history trimming after pointer refactor * admin/dash: fix JobSpec struct literal in plugin API after pointer refactor * admin/view: add D/P prefixes to EC shard badges for UI consistency * admin/plugin: use lifecycle-aware context for schema prefetching * Update ec_volume_details_templ.go * admin/stress: fix proposal sorting and log volume cleanup errors * stress: refine ec stress runner with math/rand and collection name - Added Collection field to VolumeEcShardsDeleteRequest for correct filename construction. - Replaced crypto/rand with seeded math/rand PRNG for bulk payloads. - Added documentation for EcMinAge zero-value behavior. - Added logging for ignored errors in volume/shard deletion. * admin: return internal server error for plugin store failures Changed error status code from 400 Bad Request to 500 Internal Server Error for failures in GetPluginJobDetail to correctly reflect server-side errors. * admin: implement safe channel sends and graceful shutdown sync - Added sync.WaitGroup to Plugin struct to manage background goroutines. - Implemented safeSendCh helper using recover() to prevent panics on closed channels. - Ensured Shutdown() waits for all background operations to complete. * admin: robustify plugin monitor with nil-safe time and record init - Standardized nil-safe assignment for *time.Time pointers (CreatedAt, UpdatedAt, CompletedAt). - Ensured persistJobDetailSnapshot initializes new records correctly if they don't exist on disk. - Fixed debounced persistence to trigger immediate write on job completion. * admin: improve scheduler shutdown behavior and logic guards - Replaced brittle error string matching with explicit r.shutdownCh selection for shutdown detection. - Removed redundant nil guard in buildScheduledJobSpec. - Standardized WaitGroup usage for schedulerLoop. * admin: implement deep copy for job parameters and atomic write fixes - Implemented deepCopyGenericValue and used it in cloneTrackedJob to prevent shared state. - Ensured atomicWriteFile creates parent directories before writing. * admin: remove unreachable branch in shard classification Removed an unreachable 'totalShards <= 0' check in classifyShardID as dataShards and parityShards are already guarded. * admin: secure UI links and use canonical shard constants - Added rel="noopener noreferrer" to external links for security. - Replaced magic number 14 with erasure_coding.TotalShardsCount. - Used renderEcShardBadge for missing shard list consistency. * admin: stabilize plugin tests and fix regressions - Composed a robust plugin_monitor_test.go to handle asynchronous persistence. - Updated all time.Time literals to use timeToPtr helper. - Added explicit Shutdown() calls in tests to synchronize with debounced writes. - Fixed syntax errors and orphaned struct literals in tests. * Potential fix for code scanning alert no. 278: Slice memory allocation with excessive size value Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> * Potential fix for code scanning alert no. 283: Uncontrolled data used in path expression Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> * admin: finalize refinements for error handling, scheduler, and race fixes - Standardized HTTP 500 status codes for store failures in plugin_api.go. - Tracked scheduled detection goroutines with sync.WaitGroup for safe shutdown. - Fixed race condition in safeSendDetectionComplete by extracting channel under lock. - Implemented deep copy for JobActivity details. - Used defaultDirPerm constant in atomicWriteFile. * test(ec): migrate admin dockertest to plugin APIs * admin/plugin_api: fix RunPluginJobTypeAPI to return 500 for server-side detection/filter errors * admin/plugin_api: fix ExecutePluginJobAPI to return 500 for job execution failures * admin/plugin_api: limit parseProtoJSONBody request body to 1MB to prevent unbounded memory usage * admin/plugin: consolidate regex to package-level validJobTypePattern; add char validation to sanitizeJobID * admin/plugin: fix racy Shutdown channel close with sync.Once * admin/plugin: track sendLoop and recv goroutines in WorkerStream with r.wg * admin/plugin: document writeProtoFiles atomicity — .pb is source of truth, .json is human-readable only * admin/plugin: extract activityLess helper to deduplicate nil-safe OccurredAt sort comparators * test/ec: check http.NewRequest errors to prevent nil req panics * test/ec: replace deprecated ioutil/math/rand, fix stale step comment 5.1→3.1 * plugin(ec): raise default detection and scheduling throughput limits * topology: include empty disks in volume list and EC capacity fallback * topology: remove hard 10-task cap for detection planning * Update ec_volume_details_templ.go * adjust default * fix tests --------- Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
680 lines
30 KiB
Go
680 lines
30 KiB
Go
package handlers
|
|
|
|
import (
|
|
"net/http"
|
|
"net/url"
|
|
"time"
|
|
|
|
"github.com/gin-gonic/gin"
|
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
"github.com/seaweedfs/seaweedfs/weed/admin/dash"
|
|
"github.com/seaweedfs/seaweedfs/weed/admin/view/app"
|
|
"github.com/seaweedfs/seaweedfs/weed/admin/view/layout"
|
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
|
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3tables"
|
|
"github.com/seaweedfs/seaweedfs/weed/stats"
|
|
)
|
|
|
|
// AdminHandlers contains all the HTTP handlers for the admin interface
|
|
type AdminHandlers struct {
|
|
adminServer *dash.AdminServer
|
|
authHandlers *AuthHandlers
|
|
clusterHandlers *ClusterHandlers
|
|
fileBrowserHandlers *FileBrowserHandlers
|
|
userHandlers *UserHandlers
|
|
policyHandlers *PolicyHandlers
|
|
pluginHandlers *PluginHandlers
|
|
mqHandlers *MessageQueueHandlers
|
|
serviceAccountHandlers *ServiceAccountHandlers
|
|
}
|
|
|
|
// NewAdminHandlers creates a new instance of AdminHandlers
|
|
func NewAdminHandlers(adminServer *dash.AdminServer) *AdminHandlers {
|
|
authHandlers := NewAuthHandlers(adminServer)
|
|
clusterHandlers := NewClusterHandlers(adminServer)
|
|
fileBrowserHandlers := NewFileBrowserHandlers(adminServer)
|
|
userHandlers := NewUserHandlers(adminServer)
|
|
policyHandlers := NewPolicyHandlers(adminServer)
|
|
pluginHandlers := NewPluginHandlers(adminServer)
|
|
mqHandlers := NewMessageQueueHandlers(adminServer)
|
|
serviceAccountHandlers := NewServiceAccountHandlers(adminServer)
|
|
return &AdminHandlers{
|
|
adminServer: adminServer,
|
|
authHandlers: authHandlers,
|
|
clusterHandlers: clusterHandlers,
|
|
fileBrowserHandlers: fileBrowserHandlers,
|
|
userHandlers: userHandlers,
|
|
policyHandlers: policyHandlers,
|
|
pluginHandlers: pluginHandlers,
|
|
mqHandlers: mqHandlers,
|
|
serviceAccountHandlers: serviceAccountHandlers,
|
|
}
|
|
}
|
|
|
|
// SetupRoutes configures all the routes for the admin interface
|
|
func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, adminUser, adminPassword, readOnlyUser, readOnlyPassword string, enableUI bool) {
|
|
// Health check (no auth required)
|
|
r.GET("/health", h.HealthCheck)
|
|
|
|
// Prometheus metrics endpoint (no auth required)
|
|
r.GET("/metrics", gin.WrapH(promhttp.HandlerFor(stats.Gather, promhttp.HandlerOpts{})))
|
|
|
|
// Favicon route (no auth required) - redirect to static version
|
|
r.GET("/favicon.ico", func(c *gin.Context) {
|
|
c.Redirect(http.StatusMovedPermanently, "/static/favicon.ico")
|
|
})
|
|
|
|
// Skip UI routes if UI is not enabled
|
|
if !enableUI {
|
|
return
|
|
}
|
|
|
|
if authRequired {
|
|
// Authentication routes (no auth required)
|
|
r.GET("/login", h.authHandlers.ShowLogin)
|
|
r.POST("/login", h.authHandlers.HandleLogin(adminUser, adminPassword, readOnlyUser, readOnlyPassword))
|
|
r.GET("/logout", h.authHandlers.HandleLogout)
|
|
|
|
// Protected routes group
|
|
protected := r.Group("/")
|
|
protected.Use(dash.RequireAuth())
|
|
|
|
// Main admin interface routes
|
|
protected.GET("/", h.ShowDashboard)
|
|
protected.GET("/admin", h.ShowDashboard)
|
|
|
|
// Object Store management routes
|
|
protected.GET("/object-store/buckets", h.ShowS3Buckets)
|
|
protected.GET("/object-store/buckets/:bucket", h.ShowBucketDetails)
|
|
protected.GET("/object-store/users", h.userHandlers.ShowObjectStoreUsers)
|
|
protected.GET("/object-store/policies", h.policyHandlers.ShowPolicies)
|
|
protected.GET("/object-store/service-accounts", h.serviceAccountHandlers.ShowServiceAccounts)
|
|
protected.GET("/object-store/s3tables/buckets", h.ShowS3TablesBuckets)
|
|
protected.GET("/object-store/s3tables/buckets/:bucket/namespaces", h.ShowS3TablesNamespaces)
|
|
protected.GET("/object-store/s3tables/buckets/:bucket/namespaces/:namespace/tables", h.ShowS3TablesTables)
|
|
protected.GET("/object-store/s3tables/buckets/:bucket/namespaces/:namespace/tables/:table", h.ShowS3TablesTableDetails)
|
|
protected.GET("/object-store/iceberg", h.ShowIcebergCatalog)
|
|
protected.GET("/object-store/iceberg/:catalog/namespaces", h.ShowIcebergNamespaces)
|
|
protected.GET("/object-store/iceberg/:catalog/namespaces/:namespace/tables", h.ShowIcebergTables)
|
|
protected.GET("/object-store/iceberg/:catalog/namespaces/:namespace/tables/:table", h.ShowIcebergTableDetails)
|
|
|
|
// File browser routes
|
|
protected.GET("/files", h.fileBrowserHandlers.ShowFileBrowser)
|
|
|
|
// Cluster management routes
|
|
protected.GET("/cluster/masters", h.clusterHandlers.ShowClusterMasters)
|
|
protected.GET("/cluster/filers", h.clusterHandlers.ShowClusterFilers)
|
|
protected.GET("/cluster/volume-servers", h.clusterHandlers.ShowClusterVolumeServers)
|
|
|
|
// Storage management routes
|
|
protected.GET("/storage/volumes", h.clusterHandlers.ShowClusterVolumes)
|
|
protected.GET("/storage/volumes/:id/:server", h.clusterHandlers.ShowVolumeDetails)
|
|
protected.GET("/storage/collections", h.clusterHandlers.ShowClusterCollections)
|
|
protected.GET("/storage/collections/:name", h.clusterHandlers.ShowCollectionDetails)
|
|
protected.GET("/storage/ec-shards", h.clusterHandlers.ShowClusterEcShards)
|
|
protected.GET("/storage/ec-volumes/:id", h.clusterHandlers.ShowEcVolumeDetails)
|
|
|
|
// Message Queue management routes
|
|
protected.GET("/mq/brokers", h.mqHandlers.ShowBrokers)
|
|
protected.GET("/mq/topics", h.mqHandlers.ShowTopics)
|
|
protected.GET("/mq/topics/:namespace/:topic", h.mqHandlers.ShowTopicDetails)
|
|
|
|
protected.GET("/plugin", h.pluginHandlers.ShowPlugin)
|
|
protected.GET("/plugin/configuration", h.pluginHandlers.ShowPluginConfiguration)
|
|
protected.GET("/plugin/queue", h.pluginHandlers.ShowPluginQueue)
|
|
protected.GET("/plugin/detection", h.pluginHandlers.ShowPluginDetection)
|
|
protected.GET("/plugin/execution", h.pluginHandlers.ShowPluginExecution)
|
|
protected.GET("/plugin/monitoring", h.pluginHandlers.ShowPluginMonitoring)
|
|
|
|
// API routes for AJAX calls
|
|
api := r.Group("/api")
|
|
api.Use(dash.RequireAuthAPI()) // Use API-specific auth middleware
|
|
{
|
|
api.GET("/cluster/topology", h.clusterHandlers.GetClusterTopology)
|
|
api.GET("/cluster/masters", h.clusterHandlers.GetMasters)
|
|
api.GET("/cluster/volumes", h.clusterHandlers.GetVolumeServers)
|
|
api.GET("/admin", h.adminServer.ShowAdmin) // JSON API for admin data
|
|
api.GET("/config", h.adminServer.GetConfigInfo) // Configuration information
|
|
|
|
// S3 API routes
|
|
s3Api := api.Group("/s3")
|
|
{
|
|
s3Api.GET("/buckets", h.adminServer.ListBucketsAPI)
|
|
s3Api.POST("/buckets", dash.RequireWriteAccess(), h.adminServer.CreateBucket)
|
|
s3Api.DELETE("/buckets/:bucket", dash.RequireWriteAccess(), h.adminServer.DeleteBucket)
|
|
s3Api.GET("/buckets/:bucket", h.adminServer.ShowBucketDetails)
|
|
s3Api.PUT("/buckets/:bucket/quota", dash.RequireWriteAccess(), h.adminServer.UpdateBucketQuota)
|
|
s3Api.PUT("/buckets/:bucket/owner", dash.RequireWriteAccess(), h.adminServer.UpdateBucketOwner)
|
|
}
|
|
|
|
// User management API routes
|
|
usersApi := api.Group("/users")
|
|
{
|
|
usersApi.GET("", h.userHandlers.GetUsers)
|
|
usersApi.POST("", dash.RequireWriteAccess(), h.userHandlers.CreateUser)
|
|
usersApi.GET("/:username", h.userHandlers.GetUserDetails)
|
|
usersApi.PUT("/:username", dash.RequireWriteAccess(), h.userHandlers.UpdateUser)
|
|
usersApi.DELETE("/:username", dash.RequireWriteAccess(), h.userHandlers.DeleteUser)
|
|
usersApi.POST("/:username/access-keys", dash.RequireWriteAccess(), h.userHandlers.CreateAccessKey)
|
|
usersApi.DELETE("/:username/access-keys/:accessKeyId", dash.RequireWriteAccess(), h.userHandlers.DeleteAccessKey)
|
|
usersApi.PUT("/:username/access-keys/:accessKeyId/status", dash.RequireWriteAccess(), h.userHandlers.UpdateAccessKeyStatus)
|
|
usersApi.GET("/:username/policies", h.userHandlers.GetUserPolicies)
|
|
usersApi.PUT("/:username/policies", dash.RequireWriteAccess(), h.userHandlers.UpdateUserPolicies)
|
|
}
|
|
|
|
// Service Account management API routes
|
|
saApi := api.Group("/service-accounts")
|
|
{
|
|
saApi.GET("", h.serviceAccountHandlers.GetServiceAccounts)
|
|
saApi.POST("", dash.RequireWriteAccess(), h.serviceAccountHandlers.CreateServiceAccount)
|
|
saApi.GET("/:id", h.serviceAccountHandlers.GetServiceAccountDetails)
|
|
saApi.PUT("/:id", dash.RequireWriteAccess(), h.serviceAccountHandlers.UpdateServiceAccount)
|
|
saApi.DELETE("/:id", dash.RequireWriteAccess(), h.serviceAccountHandlers.DeleteServiceAccount)
|
|
}
|
|
|
|
// Object Store Policy management API routes
|
|
objectStorePoliciesApi := api.Group("/object-store/policies")
|
|
{
|
|
objectStorePoliciesApi.GET("", h.policyHandlers.GetPolicies)
|
|
objectStorePoliciesApi.POST("", dash.RequireWriteAccess(), h.policyHandlers.CreatePolicy)
|
|
objectStorePoliciesApi.GET("/:name", h.policyHandlers.GetPolicy)
|
|
objectStorePoliciesApi.PUT("/:name", dash.RequireWriteAccess(), h.policyHandlers.UpdatePolicy)
|
|
objectStorePoliciesApi.DELETE("/:name", dash.RequireWriteAccess(), h.policyHandlers.DeletePolicy)
|
|
objectStorePoliciesApi.POST("/validate", h.policyHandlers.ValidatePolicy)
|
|
}
|
|
|
|
// S3 Tables API routes
|
|
s3TablesApi := api.Group("/s3tables")
|
|
{
|
|
s3TablesApi.GET("/buckets", h.adminServer.ListS3TablesBucketsAPI)
|
|
s3TablesApi.POST("/buckets", dash.RequireWriteAccess(), h.adminServer.CreateS3TablesBucket)
|
|
s3TablesApi.DELETE("/buckets", dash.RequireWriteAccess(), h.adminServer.DeleteS3TablesBucket)
|
|
s3TablesApi.GET("/namespaces", h.adminServer.ListS3TablesNamespacesAPI)
|
|
s3TablesApi.POST("/namespaces", dash.RequireWriteAccess(), h.adminServer.CreateS3TablesNamespace)
|
|
s3TablesApi.DELETE("/namespaces", dash.RequireWriteAccess(), h.adminServer.DeleteS3TablesNamespace)
|
|
s3TablesApi.GET("/tables", h.adminServer.ListS3TablesTablesAPI)
|
|
s3TablesApi.POST("/tables", dash.RequireWriteAccess(), h.adminServer.CreateS3TablesTable)
|
|
s3TablesApi.DELETE("/tables", dash.RequireWriteAccess(), h.adminServer.DeleteS3TablesTable)
|
|
s3TablesApi.PUT("/bucket-policy", dash.RequireWriteAccess(), h.adminServer.PutS3TablesBucketPolicy)
|
|
s3TablesApi.GET("/bucket-policy", h.adminServer.GetS3TablesBucketPolicy)
|
|
s3TablesApi.DELETE("/bucket-policy", dash.RequireWriteAccess(), h.adminServer.DeleteS3TablesBucketPolicy)
|
|
s3TablesApi.PUT("/table-policy", dash.RequireWriteAccess(), h.adminServer.PutS3TablesTablePolicy)
|
|
s3TablesApi.GET("/table-policy", h.adminServer.GetS3TablesTablePolicy)
|
|
s3TablesApi.DELETE("/table-policy", dash.RequireWriteAccess(), h.adminServer.DeleteS3TablesTablePolicy)
|
|
s3TablesApi.PUT("/tags", dash.RequireWriteAccess(), h.adminServer.TagS3TablesResource)
|
|
s3TablesApi.GET("/tags", h.adminServer.ListS3TablesTags)
|
|
s3TablesApi.DELETE("/tags", dash.RequireWriteAccess(), h.adminServer.UntagS3TablesResource)
|
|
}
|
|
|
|
// File management API routes
|
|
filesApi := api.Group("/files")
|
|
{
|
|
filesApi.DELETE("/delete", dash.RequireWriteAccess(), h.fileBrowserHandlers.DeleteFile)
|
|
filesApi.DELETE("/delete-multiple", dash.RequireWriteAccess(), h.fileBrowserHandlers.DeleteMultipleFiles)
|
|
filesApi.POST("/create-folder", dash.RequireWriteAccess(), h.fileBrowserHandlers.CreateFolder)
|
|
filesApi.POST("/upload", dash.RequireWriteAccess(), h.fileBrowserHandlers.UploadFile)
|
|
filesApi.GET("/download", h.fileBrowserHandlers.DownloadFile)
|
|
filesApi.GET("/view", h.fileBrowserHandlers.ViewFile)
|
|
filesApi.GET("/properties", h.fileBrowserHandlers.GetFileProperties)
|
|
}
|
|
|
|
// Volume management API routes
|
|
volumeApi := api.Group("/volumes")
|
|
{
|
|
volumeApi.POST("/:id/:server/vacuum", dash.RequireWriteAccess(), h.clusterHandlers.VacuumVolume)
|
|
}
|
|
|
|
// Plugin API routes
|
|
pluginApi := api.Group("/plugin")
|
|
{
|
|
pluginApi.GET("/status", h.adminServer.GetPluginStatusAPI)
|
|
pluginApi.GET("/workers", h.adminServer.GetPluginWorkersAPI)
|
|
pluginApi.GET("/job-types", h.adminServer.GetPluginJobTypesAPI)
|
|
pluginApi.GET("/jobs", h.adminServer.GetPluginJobsAPI)
|
|
pluginApi.GET("/jobs/:jobId", h.adminServer.GetPluginJobAPI)
|
|
pluginApi.GET("/jobs/:jobId/detail", h.adminServer.GetPluginJobDetailAPI)
|
|
pluginApi.GET("/activities", h.adminServer.GetPluginActivitiesAPI)
|
|
pluginApi.GET("/scheduler-states", h.adminServer.GetPluginSchedulerStatesAPI)
|
|
pluginApi.GET("/job-types/:jobType/descriptor", h.adminServer.GetPluginJobTypeDescriptorAPI)
|
|
pluginApi.POST("/job-types/:jobType/schema", h.adminServer.RequestPluginJobTypeSchemaAPI)
|
|
pluginApi.GET("/job-types/:jobType/config", h.adminServer.GetPluginJobTypeConfigAPI)
|
|
pluginApi.PUT("/job-types/:jobType/config", dash.RequireWriteAccess(), h.adminServer.UpdatePluginJobTypeConfigAPI)
|
|
pluginApi.GET("/job-types/:jobType/runs", h.adminServer.GetPluginRunHistoryAPI)
|
|
pluginApi.POST("/job-types/:jobType/detect", dash.RequireWriteAccess(), h.adminServer.TriggerPluginDetectionAPI)
|
|
pluginApi.POST("/job-types/:jobType/run", dash.RequireWriteAccess(), h.adminServer.RunPluginJobTypeAPI)
|
|
pluginApi.POST("/jobs/execute", dash.RequireWriteAccess(), h.adminServer.ExecutePluginJobAPI)
|
|
}
|
|
|
|
// Message Queue API routes
|
|
mqApi := api.Group("/mq")
|
|
{
|
|
mqApi.GET("/topics/:namespace/:topic", h.mqHandlers.GetTopicDetailsAPI)
|
|
mqApi.POST("/topics/create", dash.RequireWriteAccess(), h.mqHandlers.CreateTopicAPI)
|
|
mqApi.POST("/topics/retention/update", dash.RequireWriteAccess(), h.mqHandlers.UpdateTopicRetentionAPI)
|
|
mqApi.POST("/retention/purge", dash.RequireWriteAccess(), h.adminServer.TriggerTopicRetentionPurgeAPI)
|
|
}
|
|
}
|
|
} else {
|
|
// No authentication required - all routes are public
|
|
r.GET("/", h.ShowDashboard)
|
|
r.GET("/admin", h.ShowDashboard)
|
|
|
|
// Object Store management routes
|
|
r.GET("/object-store/buckets", h.ShowS3Buckets)
|
|
r.GET("/object-store/buckets/:bucket", h.ShowBucketDetails)
|
|
r.GET("/object-store/users", h.userHandlers.ShowObjectStoreUsers)
|
|
r.GET("/object-store/policies", h.policyHandlers.ShowPolicies)
|
|
r.GET("/object-store/service-accounts", h.serviceAccountHandlers.ShowServiceAccounts)
|
|
r.GET("/object-store/s3tables/buckets", h.ShowS3TablesBuckets)
|
|
r.GET("/object-store/s3tables/buckets/:bucket/namespaces", h.ShowS3TablesNamespaces)
|
|
r.GET("/object-store/s3tables/buckets/:bucket/namespaces/:namespace/tables", h.ShowS3TablesTables)
|
|
r.GET("/object-store/s3tables/buckets/:bucket/namespaces/:namespace/tables/:table", h.ShowS3TablesTableDetails)
|
|
r.GET("/object-store/iceberg", h.ShowIcebergCatalog)
|
|
r.GET("/object-store/iceberg/:catalog/namespaces", h.ShowIcebergNamespaces)
|
|
r.GET("/object-store/iceberg/:catalog/namespaces/:namespace/tables", h.ShowIcebergTables)
|
|
r.GET("/object-store/iceberg/:catalog/namespaces/:namespace/tables/:table", h.ShowIcebergTableDetails)
|
|
|
|
// File browser routes
|
|
r.GET("/files", h.fileBrowserHandlers.ShowFileBrowser)
|
|
|
|
// Cluster management routes
|
|
r.GET("/cluster/masters", h.clusterHandlers.ShowClusterMasters)
|
|
r.GET("/cluster/filers", h.clusterHandlers.ShowClusterFilers)
|
|
r.GET("/cluster/volume-servers", h.clusterHandlers.ShowClusterVolumeServers)
|
|
|
|
// Storage management routes
|
|
r.GET("/storage/volumes", h.clusterHandlers.ShowClusterVolumes)
|
|
r.GET("/storage/volumes/:id/:server", h.clusterHandlers.ShowVolumeDetails)
|
|
r.GET("/storage/collections", h.clusterHandlers.ShowClusterCollections)
|
|
r.GET("/storage/collections/:name", h.clusterHandlers.ShowCollectionDetails)
|
|
r.GET("/storage/ec-shards", h.clusterHandlers.ShowClusterEcShards)
|
|
r.GET("/storage/ec-volumes/:id", h.clusterHandlers.ShowEcVolumeDetails)
|
|
|
|
// Message Queue management routes
|
|
r.GET("/mq/brokers", h.mqHandlers.ShowBrokers)
|
|
r.GET("/mq/topics", h.mqHandlers.ShowTopics)
|
|
r.GET("/mq/topics/:namespace/:topic", h.mqHandlers.ShowTopicDetails)
|
|
|
|
r.GET("/plugin", h.pluginHandlers.ShowPlugin)
|
|
r.GET("/plugin/configuration", h.pluginHandlers.ShowPluginConfiguration)
|
|
r.GET("/plugin/queue", h.pluginHandlers.ShowPluginQueue)
|
|
r.GET("/plugin/detection", h.pluginHandlers.ShowPluginDetection)
|
|
r.GET("/plugin/execution", h.pluginHandlers.ShowPluginExecution)
|
|
r.GET("/plugin/monitoring", h.pluginHandlers.ShowPluginMonitoring)
|
|
|
|
// API routes for AJAX calls
|
|
api := r.Group("/api")
|
|
{
|
|
api.GET("/cluster/topology", h.clusterHandlers.GetClusterTopology)
|
|
api.GET("/cluster/masters", h.clusterHandlers.GetMasters)
|
|
api.GET("/cluster/volumes", h.clusterHandlers.GetVolumeServers)
|
|
api.GET("/admin", h.adminServer.ShowAdmin) // JSON API for admin data
|
|
api.GET("/config", h.adminServer.GetConfigInfo) // Configuration information
|
|
|
|
// S3 API routes
|
|
s3Api := api.Group("/s3")
|
|
{
|
|
s3Api.GET("/buckets", h.adminServer.ListBucketsAPI)
|
|
s3Api.POST("/buckets", h.adminServer.CreateBucket)
|
|
s3Api.DELETE("/buckets/:bucket", h.adminServer.DeleteBucket)
|
|
s3Api.GET("/buckets/:bucket", h.adminServer.ShowBucketDetails)
|
|
s3Api.PUT("/buckets/:bucket/quota", h.adminServer.UpdateBucketQuota)
|
|
s3Api.PUT("/buckets/:bucket/owner", h.adminServer.UpdateBucketOwner)
|
|
}
|
|
|
|
// User management API routes
|
|
usersApi := api.Group("/users")
|
|
{
|
|
usersApi.GET("", h.userHandlers.GetUsers)
|
|
usersApi.POST("", h.userHandlers.CreateUser)
|
|
usersApi.GET("/:username", h.userHandlers.GetUserDetails)
|
|
usersApi.PUT("/:username", h.userHandlers.UpdateUser)
|
|
usersApi.DELETE("/:username", h.userHandlers.DeleteUser)
|
|
usersApi.POST("/:username/access-keys", h.userHandlers.CreateAccessKey)
|
|
usersApi.DELETE("/:username/access-keys/:accessKeyId", h.userHandlers.DeleteAccessKey)
|
|
usersApi.PUT("/:username/access-keys/:accessKeyId/status", h.userHandlers.UpdateAccessKeyStatus)
|
|
usersApi.GET("/:username/policies", h.userHandlers.GetUserPolicies)
|
|
usersApi.PUT("/:username/policies", h.userHandlers.UpdateUserPolicies)
|
|
}
|
|
|
|
// Service Account management API routes
|
|
saApi := api.Group("/service-accounts")
|
|
{
|
|
saApi.GET("", h.serviceAccountHandlers.GetServiceAccounts)
|
|
saApi.POST("", h.serviceAccountHandlers.CreateServiceAccount)
|
|
saApi.GET("/:id", h.serviceAccountHandlers.GetServiceAccountDetails)
|
|
saApi.PUT("/:id", h.serviceAccountHandlers.UpdateServiceAccount)
|
|
saApi.DELETE("/:id", h.serviceAccountHandlers.DeleteServiceAccount)
|
|
}
|
|
|
|
// Object Store Policy management API routes
|
|
objectStorePoliciesApi := api.Group("/object-store/policies")
|
|
{
|
|
objectStorePoliciesApi.GET("", h.policyHandlers.GetPolicies)
|
|
objectStorePoliciesApi.POST("", h.policyHandlers.CreatePolicy)
|
|
objectStorePoliciesApi.GET("/:name", h.policyHandlers.GetPolicy)
|
|
objectStorePoliciesApi.PUT("/:name", h.policyHandlers.UpdatePolicy)
|
|
objectStorePoliciesApi.DELETE("/:name", h.policyHandlers.DeletePolicy)
|
|
objectStorePoliciesApi.POST("/validate", h.policyHandlers.ValidatePolicy)
|
|
}
|
|
|
|
// S3 Tables API routes
|
|
s3TablesApi := api.Group("/s3tables")
|
|
{
|
|
s3TablesApi.GET("/buckets", h.adminServer.ListS3TablesBucketsAPI)
|
|
s3TablesApi.POST("/buckets", h.adminServer.CreateS3TablesBucket)
|
|
s3TablesApi.DELETE("/buckets", h.adminServer.DeleteS3TablesBucket)
|
|
s3TablesApi.GET("/namespaces", h.adminServer.ListS3TablesNamespacesAPI)
|
|
s3TablesApi.POST("/namespaces", h.adminServer.CreateS3TablesNamespace)
|
|
s3TablesApi.DELETE("/namespaces", h.adminServer.DeleteS3TablesNamespace)
|
|
s3TablesApi.GET("/tables", h.adminServer.ListS3TablesTablesAPI)
|
|
s3TablesApi.POST("/tables", h.adminServer.CreateS3TablesTable)
|
|
s3TablesApi.DELETE("/tables", h.adminServer.DeleteS3TablesTable)
|
|
s3TablesApi.PUT("/bucket-policy", h.adminServer.PutS3TablesBucketPolicy)
|
|
s3TablesApi.GET("/bucket-policy", h.adminServer.GetS3TablesBucketPolicy)
|
|
s3TablesApi.DELETE("/bucket-policy", h.adminServer.DeleteS3TablesBucketPolicy)
|
|
s3TablesApi.PUT("/table-policy", h.adminServer.PutS3TablesTablePolicy)
|
|
s3TablesApi.GET("/table-policy", h.adminServer.GetS3TablesTablePolicy)
|
|
s3TablesApi.DELETE("/table-policy", h.adminServer.DeleteS3TablesTablePolicy)
|
|
s3TablesApi.PUT("/tags", h.adminServer.TagS3TablesResource)
|
|
s3TablesApi.GET("/tags", h.adminServer.ListS3TablesTags)
|
|
s3TablesApi.DELETE("/tags", h.adminServer.UntagS3TablesResource)
|
|
}
|
|
|
|
// File management API routes
|
|
filesApi := api.Group("/files")
|
|
{
|
|
filesApi.DELETE("/delete", h.fileBrowserHandlers.DeleteFile)
|
|
filesApi.DELETE("/delete-multiple", h.fileBrowserHandlers.DeleteMultipleFiles)
|
|
filesApi.POST("/create-folder", h.fileBrowserHandlers.CreateFolder)
|
|
filesApi.POST("/upload", h.fileBrowserHandlers.UploadFile)
|
|
filesApi.GET("/download", h.fileBrowserHandlers.DownloadFile)
|
|
filesApi.GET("/view", h.fileBrowserHandlers.ViewFile)
|
|
filesApi.GET("/properties", h.fileBrowserHandlers.GetFileProperties)
|
|
}
|
|
|
|
// Volume management API routes
|
|
volumeApi := api.Group("/volumes")
|
|
{
|
|
volumeApi.POST("/:id/:server/vacuum", h.clusterHandlers.VacuumVolume)
|
|
}
|
|
|
|
// Plugin API routes
|
|
pluginApi := api.Group("/plugin")
|
|
{
|
|
pluginApi.GET("/status", h.adminServer.GetPluginStatusAPI)
|
|
pluginApi.GET("/workers", h.adminServer.GetPluginWorkersAPI)
|
|
pluginApi.GET("/job-types", h.adminServer.GetPluginJobTypesAPI)
|
|
pluginApi.GET("/jobs", h.adminServer.GetPluginJobsAPI)
|
|
pluginApi.GET("/jobs/:jobId", h.adminServer.GetPluginJobAPI)
|
|
pluginApi.GET("/jobs/:jobId/detail", h.adminServer.GetPluginJobDetailAPI)
|
|
pluginApi.GET("/activities", h.adminServer.GetPluginActivitiesAPI)
|
|
pluginApi.GET("/scheduler-states", h.adminServer.GetPluginSchedulerStatesAPI)
|
|
pluginApi.GET("/job-types/:jobType/descriptor", h.adminServer.GetPluginJobTypeDescriptorAPI)
|
|
pluginApi.POST("/job-types/:jobType/schema", h.adminServer.RequestPluginJobTypeSchemaAPI)
|
|
pluginApi.GET("/job-types/:jobType/config", h.adminServer.GetPluginJobTypeConfigAPI)
|
|
pluginApi.PUT("/job-types/:jobType/config", h.adminServer.UpdatePluginJobTypeConfigAPI)
|
|
pluginApi.GET("/job-types/:jobType/runs", h.adminServer.GetPluginRunHistoryAPI)
|
|
pluginApi.POST("/job-types/:jobType/detect", h.adminServer.TriggerPluginDetectionAPI)
|
|
pluginApi.POST("/job-types/:jobType/run", h.adminServer.RunPluginJobTypeAPI)
|
|
pluginApi.POST("/jobs/execute", h.adminServer.ExecutePluginJobAPI)
|
|
}
|
|
|
|
// Message Queue API routes
|
|
mqApi := api.Group("/mq")
|
|
{
|
|
mqApi.GET("/topics/:namespace/:topic", h.mqHandlers.GetTopicDetailsAPI)
|
|
mqApi.POST("/topics/create", h.mqHandlers.CreateTopicAPI)
|
|
mqApi.POST("/topics/retention/update", h.mqHandlers.UpdateTopicRetentionAPI)
|
|
mqApi.POST("/retention/purge", h.adminServer.TriggerTopicRetentionPurgeAPI)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// HealthCheck returns the health status of the admin interface
|
|
func (h *AdminHandlers) HealthCheck(c *gin.Context) {
|
|
c.JSON(200, gin.H{"health": "ok"})
|
|
}
|
|
|
|
// ShowDashboard renders the main admin dashboard
|
|
func (h *AdminHandlers) ShowDashboard(c *gin.Context) {
|
|
// Get admin data from the server
|
|
adminData := h.getAdminData(c)
|
|
|
|
// Render HTML template
|
|
c.Header("Content-Type", "text/html")
|
|
adminComponent := app.Admin(adminData)
|
|
layoutComponent := layout.Layout(c, adminComponent)
|
|
err := layoutComponent.Render(c.Request.Context(), c.Writer)
|
|
if err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
|
|
return
|
|
}
|
|
}
|
|
|
|
// ShowS3Buckets renders the Object Store buckets management page
|
|
func (h *AdminHandlers) ShowS3Buckets(c *gin.Context) {
|
|
// Get Object Store buckets data from the server
|
|
s3Data := h.getS3BucketsData(c)
|
|
|
|
// Render HTML template
|
|
c.Header("Content-Type", "text/html")
|
|
s3Component := app.S3Buckets(s3Data)
|
|
layoutComponent := layout.Layout(c, s3Component)
|
|
err := layoutComponent.Render(c.Request.Context(), c.Writer)
|
|
if err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
|
|
return
|
|
}
|
|
}
|
|
|
|
// ShowS3TablesBuckets renders the S3 Tables buckets page
|
|
func (h *AdminHandlers) ShowS3TablesBuckets(c *gin.Context) {
|
|
username := h.getUsername(c)
|
|
|
|
data, err := h.adminServer.GetS3TablesBucketsData(c.Request.Context())
|
|
if err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get S3 Tables buckets: " + err.Error()})
|
|
return
|
|
}
|
|
data.Username = username
|
|
|
|
c.Header("Content-Type", "text/html")
|
|
component := app.S3TablesBuckets(data)
|
|
layoutComponent := layout.Layout(c, component)
|
|
if err := layoutComponent.Render(c.Request.Context(), c.Writer); err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
|
|
}
|
|
}
|
|
|
|
// ShowS3TablesNamespaces renders namespaces for a table bucket
|
|
func (h *AdminHandlers) ShowS3TablesNamespaces(c *gin.Context) {
|
|
username := h.getUsername(c)
|
|
|
|
bucketName := c.Param("bucket")
|
|
arn, err := buildS3TablesBucketArn(bucketName)
|
|
if err != nil {
|
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
data, err := h.adminServer.GetS3TablesNamespacesData(c.Request.Context(), arn)
|
|
if err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get S3 Tables namespaces: " + err.Error()})
|
|
return
|
|
}
|
|
data.Username = username
|
|
|
|
c.Header("Content-Type", "text/html")
|
|
component := app.S3TablesNamespaces(data)
|
|
layoutComponent := layout.Layout(c, component)
|
|
if err := layoutComponent.Render(c.Request.Context(), c.Writer); err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
|
|
}
|
|
}
|
|
|
|
// ShowS3TablesTables renders tables for a namespace
|
|
func (h *AdminHandlers) ShowS3TablesTables(c *gin.Context) {
|
|
username := h.getUsername(c)
|
|
|
|
bucketName := c.Param("bucket")
|
|
namespace := c.Param("namespace")
|
|
arn, err := buildS3TablesBucketArn(bucketName)
|
|
if err != nil {
|
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
data, err := h.adminServer.GetS3TablesTablesData(c.Request.Context(), arn, namespace)
|
|
if err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get S3 Tables tables: " + err.Error()})
|
|
return
|
|
}
|
|
data.Username = username
|
|
|
|
c.Header("Content-Type", "text/html")
|
|
component := app.S3TablesTables(data)
|
|
layoutComponent := layout.Layout(c, component)
|
|
if err := layoutComponent.Render(c.Request.Context(), c.Writer); err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
|
|
}
|
|
}
|
|
|
|
// ShowS3TablesTableDetails renders Iceberg table metadata and snapshot details on the merged S3 Tables path.
|
|
func (h *AdminHandlers) ShowS3TablesTableDetails(c *gin.Context) {
|
|
bucketName := c.Param("bucket")
|
|
namespace := c.Param("namespace")
|
|
tableName := c.Param("table")
|
|
arn, err := buildS3TablesBucketArn(bucketName)
|
|
if err != nil {
|
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
data, err := h.adminServer.GetIcebergTableDetailsData(c.Request.Context(), bucketName, arn, namespace, tableName)
|
|
if err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get table details: " + err.Error()})
|
|
return
|
|
}
|
|
data.Username = h.getUsername(c)
|
|
|
|
c.Header("Content-Type", "text/html")
|
|
component := app.IcebergTableDetails(data)
|
|
layoutComponent := layout.Layout(c, component)
|
|
if err := layoutComponent.Render(c.Request.Context(), c.Writer); err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
|
|
}
|
|
}
|
|
|
|
func buildS3TablesBucketArn(bucketName string) (string, error) {
|
|
return s3tables.BuildBucketARN(s3tables.DefaultRegion, s3_constants.AccountAdminId, bucketName)
|
|
}
|
|
|
|
// getUsername returns the username from context, defaulting to "admin" if not set
|
|
func (h *AdminHandlers) getUsername(c *gin.Context) string {
|
|
username := c.GetString("username")
|
|
if username == "" {
|
|
username = "admin"
|
|
}
|
|
return username
|
|
}
|
|
|
|
// ShowIcebergCatalog redirects legacy Iceberg catalog URL to the merged S3 Tables buckets page.
|
|
func (h *AdminHandlers) ShowIcebergCatalog(c *gin.Context) {
|
|
c.Redirect(http.StatusMovedPermanently, "/object-store/s3tables/buckets")
|
|
}
|
|
|
|
// ShowIcebergNamespaces redirects legacy Iceberg namespaces URL to the merged S3 Tables namespaces page.
|
|
func (h *AdminHandlers) ShowIcebergNamespaces(c *gin.Context) {
|
|
catalogName := c.Param("catalog")
|
|
c.Redirect(http.StatusMovedPermanently, "/object-store/s3tables/buckets/"+url.PathEscape(catalogName)+"/namespaces")
|
|
}
|
|
|
|
// ShowIcebergTables redirects legacy Iceberg tables URL to the merged S3 Tables tables page.
|
|
func (h *AdminHandlers) ShowIcebergTables(c *gin.Context) {
|
|
catalogName := c.Param("catalog")
|
|
namespace := c.Param("namespace")
|
|
c.Redirect(http.StatusMovedPermanently, "/object-store/s3tables/buckets/"+url.PathEscape(catalogName)+"/namespaces/"+url.PathEscape(namespace)+"/tables")
|
|
}
|
|
|
|
// ShowIcebergTableDetails redirects legacy Iceberg table details URL to the merged S3 Tables details page.
|
|
func (h *AdminHandlers) ShowIcebergTableDetails(c *gin.Context) {
|
|
catalogName := c.Param("catalog")
|
|
namespace := c.Param("namespace")
|
|
tableName := c.Param("table")
|
|
c.Redirect(http.StatusMovedPermanently, "/object-store/s3tables/buckets/"+url.PathEscape(catalogName)+"/namespaces/"+url.PathEscape(namespace)+"/tables/"+url.PathEscape(tableName))
|
|
}
|
|
|
|
// ShowBucketDetails returns detailed information about a specific bucket
|
|
func (h *AdminHandlers) ShowBucketDetails(c *gin.Context) {
|
|
bucketName := c.Param("bucket")
|
|
details, err := h.adminServer.GetBucketDetails(bucketName)
|
|
if err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get bucket details: " + err.Error()})
|
|
return
|
|
}
|
|
c.JSON(http.StatusOK, details)
|
|
}
|
|
|
|
// getS3BucketsData retrieves Object Store buckets data from the server
|
|
func (h *AdminHandlers) getS3BucketsData(c *gin.Context) dash.S3BucketsData {
|
|
username := c.GetString("username")
|
|
if username == "" {
|
|
username = "admin"
|
|
}
|
|
|
|
// Get Object Store buckets data
|
|
data, err := h.adminServer.GetS3BucketsData()
|
|
if err != nil {
|
|
// Return empty data on error
|
|
return dash.S3BucketsData{
|
|
Username: username,
|
|
Buckets: []dash.S3Bucket{},
|
|
TotalBuckets: 0,
|
|
TotalSize: 0,
|
|
LastUpdated: time.Now(),
|
|
}
|
|
}
|
|
|
|
data.Username = username
|
|
return data
|
|
}
|
|
|
|
// getAdminData retrieves admin data from the server (now uses consolidated method)
|
|
func (h *AdminHandlers) getAdminData(c *gin.Context) dash.AdminData {
|
|
username := c.GetString("username")
|
|
|
|
// Use the consolidated GetAdminData method from AdminServer
|
|
adminData, err := h.adminServer.GetAdminData(username)
|
|
if err != nil {
|
|
// Return default data when services are not available
|
|
if username == "" {
|
|
username = "admin"
|
|
}
|
|
|
|
masterNodes := []dash.MasterNode{
|
|
{
|
|
Address: "localhost:9333",
|
|
IsLeader: true,
|
|
},
|
|
}
|
|
|
|
return dash.AdminData{
|
|
Username: username,
|
|
TotalVolumes: 0,
|
|
TotalFiles: 0,
|
|
TotalSize: 0,
|
|
MasterNodes: masterNodes,
|
|
VolumeServers: []dash.VolumeServer{},
|
|
FilerNodes: []dash.FilerNode{},
|
|
DataCenters: []dash.DataCenter{},
|
|
LastUpdated: time.Now(),
|
|
}
|
|
}
|
|
|
|
return adminData
|
|
}
|
|
|
|
// Helper functions
|