Admin: misc improvements on admin server and workers. EC now works. (#7055)
* initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
@@ -161,6 +162,129 @@ func (h *ClusterHandlers) ShowClusterCollections(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
// ShowCollectionDetails renders the collection detail page
|
||||
func (h *ClusterHandlers) ShowCollectionDetails(c *gin.Context) {
|
||||
collectionName := c.Param("name")
|
||||
if collectionName == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "Collection name is required"})
|
||||
return
|
||||
}
|
||||
|
||||
// Parse query parameters
|
||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||
pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "25"))
|
||||
sortBy := c.DefaultQuery("sort_by", "volume_id")
|
||||
sortOrder := c.DefaultQuery("sort_order", "asc")
|
||||
|
||||
// Get collection details data (volumes and EC volumes)
|
||||
collectionDetailsData, err := h.adminServer.GetCollectionDetails(collectionName, page, pageSize, sortBy, sortOrder)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get collection details: " + err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Set username
|
||||
username := c.GetString("username")
|
||||
if username == "" {
|
||||
username = "admin"
|
||||
}
|
||||
collectionDetailsData.Username = username
|
||||
|
||||
// Render HTML template
|
||||
c.Header("Content-Type", "text/html")
|
||||
collectionDetailsComponent := app.CollectionDetails(*collectionDetailsData)
|
||||
layoutComponent := layout.Layout(c, collectionDetailsComponent)
|
||||
err = layoutComponent.Render(c.Request.Context(), c.Writer)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ShowClusterEcShards handles the cluster EC shards page (individual shards view)
|
||||
func (h *ClusterHandlers) ShowClusterEcShards(c *gin.Context) {
|
||||
// Parse query parameters
|
||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||
pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "100"))
|
||||
sortBy := c.DefaultQuery("sort_by", "volume_id")
|
||||
sortOrder := c.DefaultQuery("sort_order", "asc")
|
||||
collection := c.DefaultQuery("collection", "")
|
||||
|
||||
// Get data from admin server
|
||||
data, err := h.adminServer.GetClusterEcVolumes(page, pageSize, sortBy, sortOrder, collection)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Set username
|
||||
username := c.GetString("username")
|
||||
if username == "" {
|
||||
username = "admin"
|
||||
}
|
||||
data.Username = username
|
||||
|
||||
// Render template
|
||||
c.Header("Content-Type", "text/html")
|
||||
ecVolumesComponent := app.ClusterEcVolumes(*data)
|
||||
layoutComponent := layout.Layout(c, ecVolumesComponent)
|
||||
err = layoutComponent.Render(c.Request.Context(), c.Writer)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ShowEcVolumeDetails renders the EC volume details page
|
||||
func (h *ClusterHandlers) ShowEcVolumeDetails(c *gin.Context) {
|
||||
volumeIDStr := c.Param("id")
|
||||
|
||||
if volumeIDStr == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID is required"})
|
||||
return
|
||||
}
|
||||
|
||||
volumeID, err := strconv.Atoi(volumeIDStr)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid volume ID"})
|
||||
return
|
||||
}
|
||||
|
||||
// Check that volumeID is within uint32 range
|
||||
if volumeID < 0 || volumeID > int(math.MaxUint32) {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID out of range"})
|
||||
return
|
||||
}
|
||||
|
||||
// Parse sorting parameters
|
||||
sortBy := c.DefaultQuery("sort_by", "shard_id")
|
||||
sortOrder := c.DefaultQuery("sort_order", "asc")
|
||||
|
||||
// Get EC volume details
|
||||
ecVolumeDetails, err := h.adminServer.GetEcVolumeDetails(uint32(volumeID), sortBy, sortOrder)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get EC volume details: " + err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Set username
|
||||
username := c.GetString("username")
|
||||
if username == "" {
|
||||
username = "admin"
|
||||
}
|
||||
ecVolumeDetails.Username = username
|
||||
|
||||
// Render HTML template
|
||||
c.Header("Content-Type", "text/html")
|
||||
ecVolumeDetailsComponent := app.EcVolumeDetails(*ecVolumeDetails)
|
||||
layoutComponent := layout.Layout(c, ecVolumeDetailsComponent)
|
||||
err = layoutComponent.Render(c.Request.Context(), c.Writer)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ShowClusterMasters renders the cluster masters page
|
||||
func (h *ClusterHandlers) ShowClusterMasters(c *gin.Context) {
|
||||
// Get cluster masters data
|
||||
|
||||
Reference in New Issue
Block a user