Prune unused functions from weed/admin/dash. (#8871)
* chore(weed/admin/dash): prune unused functions * chore(weed/admin/dash): prune test-only function
This commit is contained in:
@@ -1646,159 +1646,6 @@ func (as *AdminServer) GetConfigPersistence() *ConfigPersistence {
|
||||
return as.configPersistence
|
||||
}
|
||||
|
||||
// convertJSONToMaintenanceConfig converts JSON map to protobuf MaintenanceConfig
|
||||
func convertJSONToMaintenanceConfig(jsonConfig map[string]interface{}) (*maintenance.MaintenanceConfig, error) {
|
||||
config := &maintenance.MaintenanceConfig{}
|
||||
|
||||
// Helper function to get int32 from interface{}
|
||||
getInt32 := func(key string) (int32, error) {
|
||||
if val, ok := jsonConfig[key]; ok {
|
||||
switch v := val.(type) {
|
||||
case int:
|
||||
return int32(v), nil
|
||||
case int32:
|
||||
return v, nil
|
||||
case int64:
|
||||
return int32(v), nil
|
||||
case float64:
|
||||
return int32(v), nil
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid type for %s: expected number, got %T", key, v)
|
||||
}
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Helper function to get bool from interface{}
|
||||
getBool := func(key string) bool {
|
||||
if val, ok := jsonConfig[key]; ok {
|
||||
if b, ok := val.(bool); ok {
|
||||
return b
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
// Convert basic fields
|
||||
config.Enabled = getBool("enabled")
|
||||
|
||||
if config.ScanIntervalSeconds, err = getInt32("scan_interval_seconds"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.WorkerTimeoutSeconds, err = getInt32("worker_timeout_seconds"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.TaskTimeoutSeconds, err = getInt32("task_timeout_seconds"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.RetryDelaySeconds, err = getInt32("retry_delay_seconds"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.MaxRetries, err = getInt32("max_retries"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.CleanupIntervalSeconds, err = getInt32("cleanup_interval_seconds"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.TaskRetentionSeconds, err = getInt32("task_retention_seconds"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert policy if present
|
||||
if policyData, ok := jsonConfig["policy"]; ok {
|
||||
if policyMap, ok := policyData.(map[string]interface{}); ok {
|
||||
policy := &maintenance.MaintenancePolicy{}
|
||||
|
||||
if globalMaxConcurrent, err := getInt32FromMap(policyMap, "global_max_concurrent"); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
policy.GlobalMaxConcurrent = globalMaxConcurrent
|
||||
}
|
||||
|
||||
if defaultRepeatIntervalSeconds, err := getInt32FromMap(policyMap, "default_repeat_interval_seconds"); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
policy.DefaultRepeatIntervalSeconds = defaultRepeatIntervalSeconds
|
||||
}
|
||||
|
||||
if defaultCheckIntervalSeconds, err := getInt32FromMap(policyMap, "default_check_interval_seconds"); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
policy.DefaultCheckIntervalSeconds = defaultCheckIntervalSeconds
|
||||
}
|
||||
|
||||
// Convert task policies if present
|
||||
if taskPoliciesData, ok := policyMap["task_policies"]; ok {
|
||||
if taskPoliciesMap, ok := taskPoliciesData.(map[string]interface{}); ok {
|
||||
policy.TaskPolicies = make(map[string]*maintenance.TaskPolicy)
|
||||
|
||||
for taskType, taskPolicyData := range taskPoliciesMap {
|
||||
if taskPolicyMap, ok := taskPolicyData.(map[string]interface{}); ok {
|
||||
taskPolicy := &maintenance.TaskPolicy{}
|
||||
|
||||
taskPolicy.Enabled = getBoolFromMap(taskPolicyMap, "enabled")
|
||||
|
||||
if maxConcurrent, err := getInt32FromMap(taskPolicyMap, "max_concurrent"); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
taskPolicy.MaxConcurrent = maxConcurrent
|
||||
}
|
||||
|
||||
if repeatIntervalSeconds, err := getInt32FromMap(taskPolicyMap, "repeat_interval_seconds"); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
taskPolicy.RepeatIntervalSeconds = repeatIntervalSeconds
|
||||
}
|
||||
|
||||
if checkIntervalSeconds, err := getInt32FromMap(taskPolicyMap, "check_interval_seconds"); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
taskPolicy.CheckIntervalSeconds = checkIntervalSeconds
|
||||
}
|
||||
|
||||
policy.TaskPolicies[taskType] = taskPolicy
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
config.Policy = policy
|
||||
}
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// Helper functions for map conversion
|
||||
func getInt32FromMap(m map[string]interface{}, key string) (int32, error) {
|
||||
if val, ok := m[key]; ok {
|
||||
switch v := val.(type) {
|
||||
case int:
|
||||
return int32(v), nil
|
||||
case int32:
|
||||
return v, nil
|
||||
case int64:
|
||||
return int32(v), nil
|
||||
case float64:
|
||||
return int32(v), nil
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid type for %s: expected number, got %T", key, v)
|
||||
}
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func getBoolFromMap(m map[string]interface{}, key string) bool {
|
||||
if val, ok := m[key]; ok {
|
||||
if b, ok := val.(bool); ok {
|
||||
return b
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type collectionStats struct {
|
||||
PhysicalSize int64
|
||||
LogicalSize int64
|
||||
|
||||
@@ -361,26 +361,6 @@ func normalizeQuotaUnit(unit string) (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to convert bytes to appropriate unit and size
|
||||
func convertBytesToQuota(bytes int64) (int64, string) {
|
||||
if bytes == 0 {
|
||||
return 0, "MB"
|
||||
}
|
||||
|
||||
// Convert to TB if >= 1TB
|
||||
if bytes >= 1024*1024*1024*1024 && bytes%(1024*1024*1024*1024) == 0 {
|
||||
return bytes / (1024 * 1024 * 1024 * 1024), "TB"
|
||||
}
|
||||
|
||||
// Convert to GB if >= 1GB
|
||||
if bytes >= 1024*1024*1024 && bytes%(1024*1024*1024) == 0 {
|
||||
return bytes / (1024 * 1024 * 1024), "GB"
|
||||
}
|
||||
|
||||
// Convert to MB (default)
|
||||
return bytes / (1024 * 1024), "MB"
|
||||
}
|
||||
|
||||
// SetBucketQuota sets the quota for a bucket
|
||||
func (s *AdminServer) SetBucketQuota(bucketName string, quotaBytes int64, quotaEnabled bool) error {
|
||||
return s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
@@ -506,18 +506,6 @@ func getShardCount(ecIndexBits uint32) int {
|
||||
return count
|
||||
}
|
||||
|
||||
// getMissingShards returns a slice of missing shard IDs for a volume
|
||||
// Assumes default 10+4 EC configuration (14 total shards)
|
||||
func getMissingShards(ecIndexBits uint32) []int {
|
||||
var missing []int
|
||||
for i := 0; i < erasure_coding.TotalShardsCount; i++ {
|
||||
if (ecIndexBits & (1 << uint(i))) == 0 {
|
||||
missing = append(missing, i)
|
||||
}
|
||||
}
|
||||
return missing
|
||||
}
|
||||
|
||||
// sortEcShards sorts EC shards based on the specified field and order
|
||||
func sortEcShards(shards []EcShardWithInfo, sortBy string, sortOrder string) {
|
||||
sort.Slice(shards, func(i, j int) bool {
|
||||
|
||||
@@ -430,67 +430,6 @@ func (s *AdminServer) GetConsumerGroupOffsets(namespace, topicName string) ([]Co
|
||||
return offsets, nil
|
||||
}
|
||||
|
||||
// convertRecordTypeToSchemaFields converts a protobuf RecordType to SchemaFieldInfo slice
|
||||
func convertRecordTypeToSchemaFields(recordType *schema_pb.RecordType) []SchemaFieldInfo {
|
||||
var schemaFields []SchemaFieldInfo
|
||||
|
||||
if recordType == nil || recordType.Fields == nil {
|
||||
return schemaFields
|
||||
}
|
||||
|
||||
for _, field := range recordType.Fields {
|
||||
schemaField := SchemaFieldInfo{
|
||||
Name: field.Name,
|
||||
Type: getFieldTypeString(field.Type),
|
||||
Required: field.IsRequired,
|
||||
}
|
||||
schemaFields = append(schemaFields, schemaField)
|
||||
}
|
||||
|
||||
return schemaFields
|
||||
}
|
||||
|
||||
// getFieldTypeString converts a protobuf Type to a human-readable string
|
||||
func getFieldTypeString(fieldType *schema_pb.Type) string {
|
||||
if fieldType == nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
switch kind := fieldType.Kind.(type) {
|
||||
case *schema_pb.Type_ScalarType:
|
||||
return getScalarTypeString(kind.ScalarType)
|
||||
case *schema_pb.Type_RecordType:
|
||||
return "record"
|
||||
case *schema_pb.Type_ListType:
|
||||
elementType := getFieldTypeString(kind.ListType.ElementType)
|
||||
return fmt.Sprintf("list<%s>", elementType)
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// getScalarTypeString converts a protobuf ScalarType to a string
|
||||
func getScalarTypeString(scalarType schema_pb.ScalarType) string {
|
||||
switch scalarType {
|
||||
case schema_pb.ScalarType_BOOL:
|
||||
return "bool"
|
||||
case schema_pb.ScalarType_INT32:
|
||||
return "int32"
|
||||
case schema_pb.ScalarType_INT64:
|
||||
return "int64"
|
||||
case schema_pb.ScalarType_FLOAT:
|
||||
return "float"
|
||||
case schema_pb.ScalarType_DOUBLE:
|
||||
return "double"
|
||||
case schema_pb.ScalarType_BYTES:
|
||||
return "bytes"
|
||||
case schema_pb.ScalarType_STRING:
|
||||
return "string"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// convertTopicPublishers converts protobuf TopicPublisher slice to PublisherInfo slice
|
||||
func convertTopicPublishers(publishers []*mq_pb.TopicPublisher) []PublisherInfo {
|
||||
publisherInfos := make([]PublisherInfo, 0, len(publishers))
|
||||
|
||||
@@ -2,8 +2,6 @@ package dash
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -851,43 +849,6 @@ func normalizeTimeout(timeoutSeconds int, defaultTimeout, maxTimeout time.Durati
|
||||
return timeout
|
||||
}
|
||||
|
||||
func buildJobSpecFromProposal(jobType string, proposal *plugin_pb.JobProposal, index int) *plugin_pb.JobSpec {
|
||||
now := timestamppb.Now()
|
||||
suffix := make([]byte, 4)
|
||||
if _, err := rand.Read(suffix); err != nil {
|
||||
// Fallback to simpler ID if rand fails
|
||||
suffix = []byte(fmt.Sprintf("%d", index))
|
||||
}
|
||||
jobID := fmt.Sprintf("%s-%d-%s", jobType, now.AsTime().UnixNano(), hex.EncodeToString(suffix))
|
||||
|
||||
jobSpec := &plugin_pb.JobSpec{
|
||||
JobId: jobID,
|
||||
JobType: jobType,
|
||||
Priority: plugin_pb.JobPriority_JOB_PRIORITY_NORMAL,
|
||||
CreatedAt: now,
|
||||
Labels: make(map[string]string),
|
||||
Parameters: make(map[string]*plugin_pb.ConfigValue),
|
||||
DedupeKey: "",
|
||||
}
|
||||
|
||||
if proposal != nil {
|
||||
jobSpec.Summary = proposal.Summary
|
||||
jobSpec.Detail = proposal.Detail
|
||||
if proposal.Priority != plugin_pb.JobPriority_JOB_PRIORITY_UNSPECIFIED {
|
||||
jobSpec.Priority = proposal.Priority
|
||||
}
|
||||
jobSpec.DedupeKey = proposal.DedupeKey
|
||||
jobSpec.Parameters = plugin.CloneConfigValueMap(proposal.Parameters)
|
||||
if proposal.Labels != nil {
|
||||
for k, v := range proposal.Labels {
|
||||
jobSpec.Labels[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return jobSpec
|
||||
}
|
||||
|
||||
func applyDescriptorDefaultsToPersistedConfig(
|
||||
config *plugin_pb.PersistedJobTypeConfig,
|
||||
descriptor *plugin_pb.JobTypeDescriptor,
|
||||
|
||||
@@ -115,32 +115,6 @@ func TestExpirePluginJobAPI(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildJobSpecFromProposalDoesNotReuseProposalID(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
proposal := &plugin_pb.JobProposal{
|
||||
ProposalId: "vacuum-2",
|
||||
DedupeKey: "vacuum:2",
|
||||
JobType: "vacuum",
|
||||
}
|
||||
|
||||
jobA := buildJobSpecFromProposal("vacuum", proposal, 0)
|
||||
jobB := buildJobSpecFromProposal("vacuum", proposal, 1)
|
||||
|
||||
if jobA.JobId == proposal.ProposalId {
|
||||
t.Fatalf("job id must not reuse proposal id: %s", jobA.JobId)
|
||||
}
|
||||
if jobB.JobId == proposal.ProposalId {
|
||||
t.Fatalf("job id must not reuse proposal id: %s", jobB.JobId)
|
||||
}
|
||||
if jobA.JobId == jobB.JobId {
|
||||
t.Fatalf("job ids must be unique across jobs: %s", jobA.JobId)
|
||||
}
|
||||
if jobA.DedupeKey != proposal.DedupeKey {
|
||||
t.Fatalf("dedupe key must be preserved: got=%s want=%s", jobA.DedupeKey, proposal.DedupeKey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyDescriptorDefaultsToPersistedConfigBackfillsAdminDefaults(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -787,15 +787,6 @@ func (s *WorkerGrpcServer) RequestTaskLogsFromAllWorkers(taskID string, maxEntri
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// convertTaskParameters converts task parameters to protobuf format
|
||||
func convertTaskParameters(params map[string]interface{}) map[string]string {
|
||||
result := make(map[string]string)
|
||||
for key, value := range params {
|
||||
result[key] = fmt.Sprintf("%v", value)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func findClientAddress(ctx context.Context) string {
|
||||
// fmt.Printf("FromContext %+v\n", ctx)
|
||||
pr, ok := peer.FromContext(ctx)
|
||||
|
||||
Reference in New Issue
Block a user