Files
seaweedFS/unmaintained/stress/ec_stress_runner.go
Chris Lu 8ec9ff4a12 Refactor plugin system and migrate worker runtime (#8369)
* admin: add plugin runtime UI page and route wiring

* pb: add plugin gRPC contract and generated bindings

* admin/plugin: implement worker registry, runtime, monitoring, and config store

* admin/dash: wire plugin runtime and expose plugin workflow APIs

* command: add flags to enable plugin runtime

* admin: rename remaining plugin v2 wording to plugin

* admin/plugin: add detectable job type registry helper

* admin/plugin: add scheduled detection and dispatch orchestration

* admin/plugin: prefetch job type descriptors when workers connect

* admin/plugin: add known job type discovery API and UI

* admin/plugin: refresh design doc to match current implementation

* admin/plugin: enforce per-worker scheduler concurrency limits

* admin/plugin: use descriptor runtime defaults for scheduler policy

* admin/ui: auto-load first known plugin job type on page open

* admin/plugin: bootstrap persisted config from descriptor defaults

* admin/plugin: dedupe scheduled proposals by dedupe key

* admin/ui: add job type and state filters for plugin monitoring

* admin/ui: add per-job-type plugin activity summary

* admin/plugin: split descriptor read API from schema refresh

* admin/ui: keep plugin summary metrics global while tables are filtered

* admin/plugin: retry executor reservation before timing out

* admin/plugin: expose scheduler states for monitoring

* admin/ui: show per-job-type scheduler states in plugin monitor

* pb/plugin: rename protobuf package to plugin

* admin/plugin: rename pluginRuntime wiring to plugin

* admin/plugin: remove runtime naming from plugin APIs and UI

* admin/plugin: rename runtime files to plugin naming

* admin/plugin: persist jobs and activities for monitor recovery

* admin/plugin: lease one detector worker per job type

* admin/ui: show worker load from plugin heartbeats

* admin/plugin: skip stale workers for detector and executor picks

* plugin/worker: add plugin worker command and stream runtime scaffold

* plugin/worker: implement vacuum detect and execute handlers

* admin/plugin: document external vacuum plugin worker starter

* command: update plugin.worker help to reflect implemented flow

* command/admin: drop legacy Plugin V2 label

* plugin/worker: validate vacuum job type and respect min interval

* plugin/worker: test no-op detect when min interval not elapsed

* command/admin: document plugin.worker external process

* plugin/worker: advertise configured concurrency in hello

* command/plugin.worker: add jobType handler selection

* command/plugin.worker: test handler selection by job type

* command/plugin.worker: persist worker id in workingDir

* admin/plugin: document plugin.worker jobType and workingDir flags

* plugin/worker: support cancel request for in-flight work

* plugin/worker: test cancel request acknowledgements

* command/plugin.worker: document workingDir and jobType behavior

* plugin/worker: emit executor activity events for monitor

* plugin/worker: test executor activity builder

* admin/plugin: send last successful run in detection request

* admin/plugin: send cancel request when detect or execute context ends

* admin/plugin: document worker cancel request responsibility

* admin/handlers: expose plugin scheduler states API in no-auth mode

* admin/handlers: test plugin scheduler states route registration

* admin/plugin: keep worker id on worker-generated activity records

* admin/plugin: test worker id propagation in monitor activities

* admin/dash: always initialize plugin service

* command/admin: remove plugin enable flags and default to enabled

* admin/dash: drop pluginEnabled constructor parameter

* admin/plugin UI: stop checking plugin enabled state

* admin/plugin: remove docs for plugin enable flags

* admin/dash: remove unused plugin enabled check method

* admin/dash: fallback to in-memory plugin init when dataDir fails

* admin/plugin API: expose worker gRPC port in status

* command/plugin.worker: resolve admin gRPC port via plugin status

* split plugin UI into overview/configuration/monitoring pages

* Update layout_templ.go

* add volume_balance plugin worker handler

* wire plugin.worker CLI for volume_balance job type

* add erasure_coding plugin worker handler

* wire plugin.worker CLI for erasure_coding job type

* support multi-job handlers in plugin worker runtime

* allow plugin.worker jobType as comma-separated list

* admin/plugin UI: rename to Workers and simplify config view

* plugin worker: queue detection requests instead of capacity reject

* Update plugin_worker.go

* plugin volume_balance: remove force_move/timeout from worker config UI

* plugin erasure_coding: enforce local working dir and cleanup

* admin/plugin UI: rename admin settings to job scheduling

* admin/plugin UI: persist and robustly render detection results

* admin/plugin: record and return detection trace metadata

* admin/plugin UI: show detection process and decision trace

* plugin: surface detector decision trace as activities

* mini: start a plugin worker by default

* admin/plugin UI: split monitoring into detection and execution tabs

* plugin worker: emit detection decision trace for EC and balance

* admin workers UI: split monitoring into detection and execution pages

* plugin scheduler: skip proposals for active assigned/running jobs

* admin workers UI: add job queue tab

* plugin worker: add dummy stress detector and executor job type

* admin workers UI: reorder tabs to detection queue execution

* admin workers UI: regenerate plugin template

* plugin defaults: include dummy stress and add stress tests

* plugin dummy stress: rotate detection selections across runs

* plugin scheduler: remove cross-run proposal dedupe

* plugin queue: track pending scheduled jobs

* plugin scheduler: wait for executor capacity before dispatch

* plugin scheduler: skip detection when waiting backlog is high

* plugin: add disk-backed job detail API and persistence

* admin ui: show plugin job detail modal from job id links

* plugin: generate unique job ids instead of reusing proposal ids

* plugin worker: emit heartbeats on work state changes

* plugin registry: round-robin tied executor and detector picks

* add temporary EC overnight stress runner

* plugin job details: persist and render EC execution plans

* ec volume details: color data and parity shard badges

* shard labels: keep parity ids numeric and color-only distinction

* admin: remove legacy maintenance UI routes and templates

* admin: remove dead maintenance endpoint helpers

* Update layout_templ.go

* remove dummy_stress worker and command support

* refactor plugin UI to job-type top tabs and sub-tabs

* migrate weed worker command to plugin runtime

* remove plugin.worker command and keep worker runtime with metrics

* update helm worker args for jobType and execution flags

* set plugin scheduling defaults to global 16 and per-worker 4

* stress: fix RPC context reuse and remove redundant variables in ec_stress_runner

* admin/plugin: fix lifecycle races, safe channel operations, and terminal state constants

* admin/dash: randomize job IDs and fix priority zero-value overwrite in plugin API

* admin/handlers: implement buffered rendering to prevent response corruption

* admin/plugin: implement debounced persistence flusher and optimize BuildJobDetail memory lookups

* admin/plugin: fix priority overwrite and implement bounded wait in scheduler reserve

* admin/plugin: implement atomic file writes and fix run record side effects

* admin/plugin: use P prefix for parity shard labels in execution plans

* admin/plugin: enable parallel execution for cancellation tests

* admin: refactor time.Time fields to pointers for better JSON omitempty support

* admin/plugin: implement pointer-safe time assignments and comparisons in plugin core

* admin/plugin: fix time assignment and sorting logic in plugin monitor after pointer refactor

* admin/plugin: update scheduler activity tracking to use time pointers

* admin/plugin: fix time-based run history trimming after pointer refactor

* admin/dash: fix JobSpec struct literal in plugin API after pointer refactor

* admin/view: add D/P prefixes to EC shard badges for UI consistency

* admin/plugin: use lifecycle-aware context for schema prefetching

* Update ec_volume_details_templ.go

* admin/stress: fix proposal sorting and log volume cleanup errors

* stress: refine ec stress runner with math/rand and collection name

- Added Collection field to VolumeEcShardsDeleteRequest for correct filename construction.
- Replaced crypto/rand with seeded math/rand PRNG for bulk payloads.
- Added documentation for EcMinAge zero-value behavior.
- Added logging for ignored errors in volume/shard deletion.

* admin: return internal server error for plugin store failures

Changed error status code from 400 Bad Request to 500 Internal Server Error for failures in GetPluginJobDetail to correctly reflect server-side errors.

* admin: implement safe channel sends and graceful shutdown sync

- Added sync.WaitGroup to Plugin struct to manage background goroutines.
- Implemented safeSendCh helper using recover() to prevent panics on closed channels.
- Ensured Shutdown() waits for all background operations to complete.

* admin: robustify plugin monitor with nil-safe time and record init

- Standardized nil-safe assignment for *time.Time pointers (CreatedAt, UpdatedAt, CompletedAt).
- Ensured persistJobDetailSnapshot initializes new records correctly if they don't exist on disk.
- Fixed debounced persistence to trigger immediate write on job completion.

* admin: improve scheduler shutdown behavior and logic guards

- Replaced brittle error string matching with explicit r.shutdownCh selection for shutdown detection.
- Removed redundant nil guard in buildScheduledJobSpec.
- Standardized WaitGroup usage for schedulerLoop.

* admin: implement deep copy for job parameters and atomic write fixes

- Implemented deepCopyGenericValue and used it in cloneTrackedJob to prevent shared state.
- Ensured atomicWriteFile creates parent directories before writing.

* admin: remove unreachable branch in shard classification

Removed an unreachable 'totalShards <= 0' check in classifyShardID as dataShards and parityShards are already guarded.

* admin: secure UI links and use canonical shard constants

- Added rel="noopener noreferrer" to external links for security.
- Replaced magic number 14 with erasure_coding.TotalShardsCount.
- Used renderEcShardBadge for missing shard list consistency.

* admin: stabilize plugin tests and fix regressions

- Composed a robust plugin_monitor_test.go to handle asynchronous persistence.
- Updated all time.Time literals to use timeToPtr helper.
- Added explicit Shutdown() calls in tests to synchronize with debounced writes.
- Fixed syntax errors and orphaned struct literals in tests.

* Potential fix for code scanning alert no. 278: Slice memory allocation with excessive size value

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 283: Uncontrolled data used in path expression

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* admin: finalize refinements for error handling, scheduler, and race fixes

- Standardized HTTP 500 status codes for store failures in plugin_api.go.
- Tracked scheduled detection goroutines with sync.WaitGroup for safe shutdown.
- Fixed race condition in safeSendDetectionComplete by extracting channel under lock.
- Implemented deep copy for JobActivity details.
- Used defaultDirPerm constant in atomicWriteFile.

* test(ec): migrate admin dockertest to plugin APIs

* admin/plugin_api: fix RunPluginJobTypeAPI to return 500 for server-side detection/filter errors

* admin/plugin_api: fix ExecutePluginJobAPI to return 500 for job execution failures

* admin/plugin_api: limit parseProtoJSONBody request body to 1MB to prevent unbounded memory usage

* admin/plugin: consolidate regex to package-level validJobTypePattern; add char validation to sanitizeJobID

* admin/plugin: fix racy Shutdown channel close with sync.Once

* admin/plugin: track sendLoop and recv goroutines in WorkerStream with r.wg

* admin/plugin: document writeProtoFiles atomicity — .pb is source of truth, .json is human-readable only

* admin/plugin: extract activityLess helper to deduplicate nil-safe OccurredAt sort comparators

* test/ec: check http.NewRequest errors to prevent nil req panics

* test/ec: replace deprecated ioutil/math/rand, fix stale step comment 5.1→3.1

* plugin(ec): raise default detection and scheduling throughput limits

* topology: include empty disks in volume list and EC capacity fallback

* topology: remove hard 10-task cap for detection planning

* Update ec_volume_details_templ.go

* adjust default

* fix tests

---------

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
2026-02-18 13:42:41 -08:00

516 lines
15 KiB
Go

package main
import (
"context"
"errors"
"flag"
"fmt"
"io"
"log"
mrand "math/rand"
"net/http"
"net/url"
"os/signal"
"path"
"sort"
"strings"
"sync"
"syscall"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
type config struct {
MasterAddresses []string
FilerURL string
PathPrefix string
Collection string
FileSizeBytes int64
BatchSize int
WriteInterval time.Duration
CleanupInterval time.Duration
EcMinAge time.Duration
MaxCleanupPerCycle int
RequestTimeout time.Duration
MaxRuntime time.Duration
DryRun bool
}
type runner struct {
cfg config
httpClient *http.Client
grpcDialOption grpc.DialOption
mu sync.Mutex
sequence int64
ecFirstSeenAt map[uint32]time.Time
rng *mrand.Rand
}
type ecVolumeInfo struct {
Collection string
NodeShards map[pb.ServerAddress][]uint32
}
type ecCleanupCandidate struct {
VolumeID uint32
FirstSeenAt time.Time
Info *ecVolumeInfo
}
func main() {
cfg, err := loadConfig()
if err != nil {
log.Fatalf("invalid flags: %v", err)
}
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
if cfg.MaxRuntime > 0 {
runCtx, cancel := context.WithTimeout(ctx, cfg.MaxRuntime)
defer cancel()
ctx = runCtx
}
r := &runner{
cfg: cfg,
httpClient: &http.Client{Timeout: cfg.RequestTimeout},
grpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()),
ecFirstSeenAt: make(map[uint32]time.Time),
rng: mrand.New(mrand.NewSource(time.Now().UnixNano())),
}
log.Printf(
"starting EC stress runner: masters=%s filer=%s prefix=%s collection=%s file_size=%d batch=%d write_interval=%s cleanup_interval=%s ec_min_age=%s max_cleanup=%d dry_run=%v",
strings.Join(cfg.MasterAddresses, ","),
cfg.FilerURL,
cfg.PathPrefix,
cfg.Collection,
cfg.FileSizeBytes,
cfg.BatchSize,
cfg.WriteInterval,
cfg.CleanupInterval,
cfg.EcMinAge,
cfg.MaxCleanupPerCycle,
cfg.DryRun,
)
if err := r.run(ctx); err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
log.Fatalf("runner stopped with error: %v", err)
}
log.Printf("runner stopped")
}
func loadConfig() (config, error) {
var masters string
cfg := config{}
flag.StringVar(&masters, "masters", "127.0.0.1:9333", "comma-separated master server addresses")
flag.StringVar(&cfg.FilerURL, "filer", "http://127.0.0.1:8888", "filer base URL")
flag.StringVar(&cfg.PathPrefix, "path_prefix", "/tmp/ec-stress", "filer path prefix for generated files")
flag.StringVar(&cfg.Collection, "collection", "ec_stress", "target collection for stress data")
fileSizeMB := flag.Int("file_size_mb", 8, "size per generated file in MB")
flag.IntVar(&cfg.BatchSize, "batch_size", 4, "files generated per write cycle")
flag.DurationVar(&cfg.WriteInterval, "write_interval", 5*time.Second, "interval between write cycles")
flag.DurationVar(&cfg.CleanupInterval, "cleanup_interval", 2*time.Minute, "interval between EC cleanup cycles")
flag.DurationVar(&cfg.EcMinAge, "ec_min_age", 30*time.Minute, "minimum observed EC age before deletion")
flag.IntVar(&cfg.MaxCleanupPerCycle, "max_cleanup_per_cycle", 4, "maximum EC volumes deleted per cleanup cycle")
flag.DurationVar(&cfg.RequestTimeout, "request_timeout", 20*time.Second, "HTTP/gRPC request timeout")
flag.DurationVar(&cfg.MaxRuntime, "max_runtime", 0, "maximum run duration; 0 means run until interrupted")
flag.BoolVar(&cfg.DryRun, "dry_run", false, "log actions without deleting EC shards")
flag.Parse()
cfg.MasterAddresses = splitNonEmpty(masters)
cfg.FileSizeBytes = int64(*fileSizeMB) * 1024 * 1024
if len(cfg.MasterAddresses) == 0 {
return cfg, fmt.Errorf("at least one master is required")
}
if cfg.FileSizeBytes <= 0 {
return cfg, fmt.Errorf("file_size_mb must be positive")
}
if cfg.BatchSize <= 0 {
return cfg, fmt.Errorf("batch_size must be positive")
}
if cfg.WriteInterval <= 0 {
return cfg, fmt.Errorf("write_interval must be positive")
}
if cfg.CleanupInterval <= 0 {
return cfg, fmt.Errorf("cleanup_interval must be positive")
}
if cfg.EcMinAge < 0 {
return cfg, fmt.Errorf("ec_min_age must be zero or positive")
}
// Note: EcMinAge == 0 intentionally disables the age guard, making EC volumes eligible for cleanup immediately.
if cfg.MaxCleanupPerCycle <= 0 {
return cfg, fmt.Errorf("max_cleanup_per_cycle must be positive")
}
if cfg.RequestTimeout <= 0 {
return cfg, fmt.Errorf("request_timeout must be positive")
}
cfg.PathPrefix = ensureLeadingSlash(strings.TrimSpace(cfg.PathPrefix))
cfg.Collection = strings.TrimSpace(cfg.Collection)
cfg.FilerURL = strings.TrimRight(strings.TrimSpace(cfg.FilerURL), "/")
if cfg.FilerURL == "" {
return cfg, fmt.Errorf("filer URL is required")
}
if _, err := url.ParseRequestURI(cfg.FilerURL); err != nil {
return cfg, fmt.Errorf("invalid filer URL %q: %w", cfg.FilerURL, err)
}
return cfg, nil
}
func (r *runner) run(ctx context.Context) error {
writeTicker := time.NewTicker(r.cfg.WriteInterval)
defer writeTicker.Stop()
cleanupTicker := time.NewTicker(r.cfg.CleanupInterval)
defer cleanupTicker.Stop()
r.runWriteCycle(ctx)
r.runCleanupCycle(ctx)
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-writeTicker.C:
r.runWriteCycle(ctx)
case <-cleanupTicker.C:
r.runCleanupCycle(ctx)
}
}
}
func (r *runner) runWriteCycle(ctx context.Context) {
for i := 0; i < r.cfg.BatchSize; i++ {
if ctx.Err() != nil {
return
}
if err := r.uploadOneFile(ctx); err != nil {
log.Printf("upload failed: %v", err)
}
}
}
func (r *runner) uploadOneFile(ctx context.Context) error {
sequence := r.nextSequence()
filePath := path.Join(r.cfg.PathPrefix, fmt.Sprintf("ec-stress-%d-%d.bin", time.Now().UnixNano(), sequence))
fileURL := r.cfg.FilerURL + filePath
if r.cfg.Collection != "" {
fileURL += "?collection=" + url.QueryEscape(r.cfg.Collection)
}
uploadCtx, cancel := context.WithTimeout(ctx, r.cfg.RequestTimeout)
defer cancel()
body := io.LimitReader(r.rng, r.cfg.FileSizeBytes)
request, err := http.NewRequestWithContext(uploadCtx, http.MethodPut, fileURL, body)
if err != nil {
return err
}
request.ContentLength = r.cfg.FileSizeBytes
request.Header.Set("Content-Type", "application/octet-stream")
response, err := r.httpClient.Do(request)
if err != nil {
return err
}
defer response.Body.Close()
io.Copy(io.Discard, response.Body)
if response.StatusCode < http.StatusOK || response.StatusCode >= http.StatusMultipleChoices {
return fmt.Errorf("upload %s returned %s", filePath, response.Status)
}
log.Printf("uploaded %s size=%d", filePath, r.cfg.FileSizeBytes)
return nil
}
func (r *runner) runCleanupCycle(ctx context.Context) {
volumeList, err := r.fetchVolumeList(ctx)
if err != nil {
log.Printf("cleanup skipped: fetch volume list failed: %v", err)
return
}
if volumeList == nil || volumeList.TopologyInfo == nil {
log.Printf("cleanup skipped: topology is empty")
return
}
ecVolumes := collectEcVolumes(volumeList.TopologyInfo, r.cfg.Collection)
candidates := r.selectCleanupCandidates(ecVolumes)
if len(candidates) == 0 {
log.Printf("cleanup: no EC volume candidate aged >= %s in collection=%q", r.cfg.EcMinAge, r.cfg.Collection)
return
}
log.Printf("cleanup: deleting up to %d EC volumes (found=%d)", r.cfg.MaxCleanupPerCycle, len(candidates))
deleted := 0
for _, candidate := range candidates {
if ctx.Err() != nil {
return
}
if r.cfg.DryRun {
log.Printf(
"cleanup dry-run: would delete EC volume=%d collection=%q seen_for=%s nodes=%d",
candidate.VolumeID,
candidate.Info.Collection,
time.Since(candidate.FirstSeenAt).Round(time.Second),
len(candidate.Info.NodeShards),
)
continue
}
if err := r.deleteEcVolume(ctx, candidate.VolumeID, candidate.Info); err != nil {
log.Printf("cleanup volume=%d failed: %v", candidate.VolumeID, err)
continue
}
deleted++
r.mu.Lock()
delete(r.ecFirstSeenAt, candidate.VolumeID)
r.mu.Unlock()
log.Printf("cleanup volume=%d completed", candidate.VolumeID)
}
log.Printf("cleanup finished: deleted=%d attempted=%d", deleted, len(candidates))
}
func (r *runner) fetchVolumeList(ctx context.Context) (*master_pb.VolumeListResponse, error) {
var lastErr error
for _, master := range r.cfg.MasterAddresses {
masterAddress := strings.TrimSpace(master)
if masterAddress == "" {
continue
}
var response *master_pb.VolumeListResponse
err := pb.WithMasterClient(false, pb.ServerAddress(masterAddress), r.grpcDialOption, false, func(client master_pb.SeaweedClient) error {
callCtx, cancel := context.WithTimeout(ctx, r.cfg.RequestTimeout)
defer cancel()
resp, callErr := client.VolumeList(callCtx, &master_pb.VolumeListRequest{})
if callErr != nil {
return callErr
}
response = resp
return nil
})
if err == nil {
return response, nil
}
lastErr = err
}
if lastErr == nil {
lastErr = fmt.Errorf("no valid master address")
}
return nil, lastErr
}
func collectEcVolumes(topology *master_pb.TopologyInfo, collection string) map[uint32]*ecVolumeInfo {
normalizedCollection := strings.TrimSpace(collection)
volumeShardSets := make(map[uint32]map[pb.ServerAddress]map[uint32]struct{})
volumeCollection := make(map[uint32]string)
for _, dc := range topology.GetDataCenterInfos() {
for _, rack := range dc.GetRackInfos() {
for _, node := range rack.GetDataNodeInfos() {
server := pb.NewServerAddressFromDataNode(node)
for _, disk := range node.GetDiskInfos() {
for _, shardInfo := range disk.GetEcShardInfos() {
if shardInfo == nil || shardInfo.Id == 0 {
continue
}
if normalizedCollection != "" && strings.TrimSpace(shardInfo.Collection) != normalizedCollection {
continue
}
shards := erasure_coding.ShardsInfoFromVolumeEcShardInformationMessage(shardInfo).IdsUint32()
if len(shards) == 0 {
continue
}
perVolume := volumeShardSets[shardInfo.Id]
if perVolume == nil {
perVolume = make(map[pb.ServerAddress]map[uint32]struct{})
volumeShardSets[shardInfo.Id] = perVolume
}
perNode := perVolume[server]
if perNode == nil {
perNode = make(map[uint32]struct{})
perVolume[server] = perNode
}
for _, shardID := range shards {
perNode[shardID] = struct{}{}
}
volumeCollection[shardInfo.Id] = shardInfo.Collection
}
}
}
}
}
result := make(map[uint32]*ecVolumeInfo, len(volumeShardSets))
for volumeID, perNode := range volumeShardSets {
info := &ecVolumeInfo{
Collection: volumeCollection[volumeID],
NodeShards: make(map[pb.ServerAddress][]uint32, len(perNode)),
}
for server, shardSet := range perNode {
shardIDs := make([]uint32, 0, len(shardSet))
for shardID := range shardSet {
shardIDs = append(shardIDs, shardID)
}
sort.Slice(shardIDs, func(i, j int) bool { return shardIDs[i] < shardIDs[j] })
info.NodeShards[server] = shardIDs
}
result[volumeID] = info
}
return result
}
func (r *runner) selectCleanupCandidates(ecVolumes map[uint32]*ecVolumeInfo) []ecCleanupCandidate {
now := time.Now()
r.mu.Lock()
defer r.mu.Unlock()
for volumeID := range ecVolumes {
if _, exists := r.ecFirstSeenAt[volumeID]; !exists {
r.ecFirstSeenAt[volumeID] = now
}
}
for volumeID := range r.ecFirstSeenAt {
if _, exists := ecVolumes[volumeID]; !exists {
delete(r.ecFirstSeenAt, volumeID)
}
}
candidates := make([]ecCleanupCandidate, 0, len(ecVolumes))
for volumeID, info := range ecVolumes {
firstSeenAt := r.ecFirstSeenAt[volumeID]
if r.cfg.EcMinAge > 0 && now.Sub(firstSeenAt) < r.cfg.EcMinAge {
continue
}
candidates = append(candidates, ecCleanupCandidate{
VolumeID: volumeID,
FirstSeenAt: firstSeenAt,
Info: info,
})
}
sort.Slice(candidates, func(i, j int) bool {
if candidates[i].FirstSeenAt.Equal(candidates[j].FirstSeenAt) {
return candidates[i].VolumeID < candidates[j].VolumeID
}
return candidates[i].FirstSeenAt.Before(candidates[j].FirstSeenAt)
})
if len(candidates) > r.cfg.MaxCleanupPerCycle {
candidates = candidates[:r.cfg.MaxCleanupPerCycle]
}
return candidates
}
func (r *runner) deleteEcVolume(ctx context.Context, volumeID uint32, info *ecVolumeInfo) error {
if info == nil {
return fmt.Errorf("ec volume %d has no topology info", volumeID)
}
failureCount := 0
for server, shardIDs := range info.NodeShards {
err := pb.WithVolumeServerClient(false, server, r.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
unmountCtx, unmountCancel := context.WithTimeout(ctx, r.cfg.RequestTimeout)
defer unmountCancel()
if _, err := client.VolumeEcShardsUnmount(unmountCtx, &volume_server_pb.VolumeEcShardsUnmountRequest{
VolumeId: volumeID,
ShardIds: shardIDs,
}); err != nil {
log.Printf("volume %d ec shards unmount on %s failed: %v", volumeID, server, err)
}
if len(shardIDs) > 0 {
deleteCtx, deleteCancel := context.WithTimeout(ctx, r.cfg.RequestTimeout)
defer deleteCancel()
if _, err := client.VolumeEcShardsDelete(deleteCtx, &volume_server_pb.VolumeEcShardsDeleteRequest{
VolumeId: volumeID,
Collection: r.cfg.Collection,
ShardIds: shardIDs,
}); err != nil {
return err
}
}
finalDeleteCtx, finalDeleteCancel := context.WithTimeout(ctx, r.cfg.RequestTimeout)
defer finalDeleteCancel()
if _, err := client.VolumeDelete(finalDeleteCtx, &volume_server_pb.VolumeDeleteRequest{
VolumeId: volumeID,
}); err != nil {
log.Printf("volume %d delete on %s failed: %v", volumeID, server, err)
}
return nil
})
if err != nil {
failureCount++
log.Printf("cleanup volume=%d server=%s shards=%v failed: %v", volumeID, server, shardIDs, err)
}
}
if failureCount == len(info.NodeShards) && failureCount > 0 {
return fmt.Errorf("all shard deletions failed for volume %d", volumeID)
}
if failureCount > 0 {
return fmt.Errorf("partial shard deletion failure for volume %d", volumeID)
}
return nil
}
func (r *runner) nextSequence() int64 {
r.mu.Lock()
defer r.mu.Unlock()
r.sequence++
return r.sequence
}
func splitNonEmpty(value string) []string {
parts := strings.Split(value, ",")
result := make([]string, 0, len(parts))
for _, part := range parts {
trimmed := strings.TrimSpace(part)
if trimmed != "" {
result = append(result, trimmed)
}
}
return result
}
func ensureLeadingSlash(value string) string {
if value == "" {
return "/"
}
if strings.HasPrefix(value, "/") {
return value
}
return "/" + value
}