Admin UI: Fetch task logs (#7114)
* show task details * loading tasks * task UI works * generic rendering * rendering the export link * removing placementConflicts from task parameters * remove TaskSourceLocation * remove "Server ID" column * rendering balance task source * sources and targets * fix ec task generation * move info * render timeline * simplified worker id * simplify * read task logs from worker * isValidTaskID * address comments * Update weed/worker/tasks/balance/execution.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/worker/tasks/erasure_coding/ec_task.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/worker/tasks/task_log_handler.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix shard ids * plan distributing shard id * rendering planned shards in task details * remove Conflicts * worker logs correctly * pass in dc and rack * task logging * Update weed/admin/maintenance/maintenance_queue.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * display log details * logs have fields now * sort field keys * fix link * fix collection filtering * avoid hard coded ec shard counts --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -83,13 +83,7 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s
|
||||
var filteredEcTotalSize int64
|
||||
|
||||
for _, volume := range volumes {
|
||||
// Handle "default" collection filtering for empty collections
|
||||
volumeCollection := volume.Collection
|
||||
if volumeCollection == "" {
|
||||
volumeCollection = "default"
|
||||
}
|
||||
|
||||
if volumeCollection == collection {
|
||||
if matchesCollection(volume.Collection, collection) {
|
||||
filteredVolumes = append(filteredVolumes, volume)
|
||||
filteredTotalSize += int64(volume.Size)
|
||||
}
|
||||
@@ -103,13 +97,7 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s
|
||||
for _, node := range rack.DataNodeInfos {
|
||||
for _, diskInfo := range node.DiskInfos {
|
||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||
// Handle "default" collection filtering for empty collections
|
||||
ecCollection := ecShardInfo.Collection
|
||||
if ecCollection == "" {
|
||||
ecCollection = "default"
|
||||
}
|
||||
|
||||
if ecCollection == collection {
|
||||
if matchesCollection(ecShardInfo.Collection, collection) {
|
||||
// Add all shard sizes for this EC volume
|
||||
for _, shardSize := range ecShardInfo.ShardSizes {
|
||||
filteredEcTotalSize += shardSize
|
||||
@@ -500,7 +488,7 @@ func (s *AdminServer) GetClusterVolumeServers() (*ClusterVolumeServersData, erro
|
||||
ecInfo.EcIndexBits |= ecShardInfo.EcIndexBits
|
||||
|
||||
// Collect shard sizes from this disk
|
||||
shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
|
||||
shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
|
||||
shardBits.EachSetIndex(func(shardId erasure_coding.ShardId) {
|
||||
if size, found := erasure_coding.GetShardSize(ecShardInfo, shardId); found {
|
||||
allShardSizes[shardId] = size
|
||||
|
||||
Reference in New Issue
Block a user