* refactoring * add ec shard size * address comments * passing task id There seems to be a disconnect between the pending tasks created in ActiveTopology and the TaskDetectionResult returned by this function. A taskID is generated locally and used to create pending tasks via AddPendingECShardTask, but this taskID is not stored in the TaskDetectionResult or passed along in any way. This makes it impossible for the worker that eventually executes the task to know which pending task in ActiveTopology it corresponds to. Without the correct taskID, the worker cannot call AssignTask or CompleteTask on the master, breaking the entire task lifecycle and capacity management feature. A potential solution is to add a TaskID field to TaskDetectionResult and worker_pb.TaskParams, ensuring the ID is propagated from detection to execution. * 1 source multiple destinations * task supports multi source and destination * ec needs to clean up previous shards * use erasure coding constants * getPlanningCapacityUnsafe getEffectiveAvailableCapacityUnsafe should return StorageSlotChange for calculation * use CanAccommodate to calculate * remove dead code * address comments * fix Mutex Copying in Protobuf Structs * use constants * fix estimatedSize The calculation for estimatedSize only considers source.EstimatedSize and dest.StorageChange, but omits dest.EstimatedSize. The TaskDestination struct has an EstimatedSize field, which seems to be ignored here. This could lead to an incorrect estimation of the total size of data involved in tasks on a disk. The loop should probably also include estimatedSize += dest.EstimatedSize. * at.assignTaskToDisk(task) * refactoring * Update weed/admin/topology/internal.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * fail fast * fix compilation * Update weed/worker/tasks/erasure_coding/detection.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * indexes for volume and shard locations * dedup with ToVolumeSlots * return an additional boolean to indicate success, or an error * Update abstract_sql_store.go * fix * Update weed/worker/tasks/erasure_coding/detection.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * Update weed/admin/topology/task_management.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * faster findVolumeDisk * Update weed/worker/tasks/erasure_coding/detection.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/admin/topology/storage_slot_test.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * refactor * simplify * remove unused GetDiskStorageImpact function * refactor * add comments * Update weed/admin/topology/storage_impact.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * Update weed/admin/topology/storage_slot_test.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * Update storage_impact.go * AddPendingTask The unified AddPendingTask function now serves as the single entry point for all task creation, successfully consolidating the previously separate functions while maintaining full functionality and improving code organization. --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
3233 lines
105 KiB
Go
3233 lines
105 KiB
Go
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
// versions:
|
|
// protoc-gen-go v1.36.6
|
|
// protoc v5.29.3
|
|
// source: worker.proto
|
|
|
|
package worker_pb
|
|
|
|
import (
|
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
reflect "reflect"
|
|
sync "sync"
|
|
unsafe "unsafe"
|
|
)
|
|
|
|
const (
|
|
// Verify that this generated code is sufficiently up-to-date.
|
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
|
)
|
|
|
|
// WorkerMessage represents messages from worker to admin
|
|
type WorkerMessage struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
|
|
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
|
// Types that are valid to be assigned to Message:
|
|
//
|
|
// *WorkerMessage_Registration
|
|
// *WorkerMessage_Heartbeat
|
|
// *WorkerMessage_TaskRequest
|
|
// *WorkerMessage_TaskUpdate
|
|
// *WorkerMessage_TaskComplete
|
|
// *WorkerMessage_Shutdown
|
|
// *WorkerMessage_TaskLogResponse
|
|
Message isWorkerMessage_Message `protobuf_oneof:"message"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *WorkerMessage) Reset() {
|
|
*x = WorkerMessage{}
|
|
mi := &file_worker_proto_msgTypes[0]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *WorkerMessage) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*WorkerMessage) ProtoMessage() {}
|
|
|
|
func (x *WorkerMessage) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[0]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use WorkerMessage.ProtoReflect.Descriptor instead.
|
|
func (*WorkerMessage) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{0}
|
|
}
|
|
|
|
func (x *WorkerMessage) GetWorkerId() string {
|
|
if x != nil {
|
|
return x.WorkerId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *WorkerMessage) GetTimestamp() int64 {
|
|
if x != nil {
|
|
return x.Timestamp
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *WorkerMessage) GetMessage() isWorkerMessage_Message {
|
|
if x != nil {
|
|
return x.Message
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *WorkerMessage) GetRegistration() *WorkerRegistration {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*WorkerMessage_Registration); ok {
|
|
return x.Registration
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *WorkerMessage) GetHeartbeat() *WorkerHeartbeat {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*WorkerMessage_Heartbeat); ok {
|
|
return x.Heartbeat
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *WorkerMessage) GetTaskRequest() *TaskRequest {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*WorkerMessage_TaskRequest); ok {
|
|
return x.TaskRequest
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *WorkerMessage) GetTaskUpdate() *TaskUpdate {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*WorkerMessage_TaskUpdate); ok {
|
|
return x.TaskUpdate
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *WorkerMessage) GetTaskComplete() *TaskComplete {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*WorkerMessage_TaskComplete); ok {
|
|
return x.TaskComplete
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *WorkerMessage) GetShutdown() *WorkerShutdown {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*WorkerMessage_Shutdown); ok {
|
|
return x.Shutdown
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *WorkerMessage) GetTaskLogResponse() *TaskLogResponse {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*WorkerMessage_TaskLogResponse); ok {
|
|
return x.TaskLogResponse
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type isWorkerMessage_Message interface {
|
|
isWorkerMessage_Message()
|
|
}
|
|
|
|
type WorkerMessage_Registration struct {
|
|
Registration *WorkerRegistration `protobuf:"bytes,3,opt,name=registration,proto3,oneof"`
|
|
}
|
|
|
|
type WorkerMessage_Heartbeat struct {
|
|
Heartbeat *WorkerHeartbeat `protobuf:"bytes,4,opt,name=heartbeat,proto3,oneof"`
|
|
}
|
|
|
|
type WorkerMessage_TaskRequest struct {
|
|
TaskRequest *TaskRequest `protobuf:"bytes,5,opt,name=task_request,json=taskRequest,proto3,oneof"`
|
|
}
|
|
|
|
type WorkerMessage_TaskUpdate struct {
|
|
TaskUpdate *TaskUpdate `protobuf:"bytes,6,opt,name=task_update,json=taskUpdate,proto3,oneof"`
|
|
}
|
|
|
|
type WorkerMessage_TaskComplete struct {
|
|
TaskComplete *TaskComplete `protobuf:"bytes,7,opt,name=task_complete,json=taskComplete,proto3,oneof"`
|
|
}
|
|
|
|
type WorkerMessage_Shutdown struct {
|
|
Shutdown *WorkerShutdown `protobuf:"bytes,8,opt,name=shutdown,proto3,oneof"`
|
|
}
|
|
|
|
type WorkerMessage_TaskLogResponse struct {
|
|
TaskLogResponse *TaskLogResponse `protobuf:"bytes,9,opt,name=task_log_response,json=taskLogResponse,proto3,oneof"`
|
|
}
|
|
|
|
func (*WorkerMessage_Registration) isWorkerMessage_Message() {}
|
|
|
|
func (*WorkerMessage_Heartbeat) isWorkerMessage_Message() {}
|
|
|
|
func (*WorkerMessage_TaskRequest) isWorkerMessage_Message() {}
|
|
|
|
func (*WorkerMessage_TaskUpdate) isWorkerMessage_Message() {}
|
|
|
|
func (*WorkerMessage_TaskComplete) isWorkerMessage_Message() {}
|
|
|
|
func (*WorkerMessage_Shutdown) isWorkerMessage_Message() {}
|
|
|
|
func (*WorkerMessage_TaskLogResponse) isWorkerMessage_Message() {}
|
|
|
|
// AdminMessage represents messages from admin to worker
|
|
type AdminMessage struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
AdminId string `protobuf:"bytes,1,opt,name=admin_id,json=adminId,proto3" json:"admin_id,omitempty"`
|
|
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
|
// Types that are valid to be assigned to Message:
|
|
//
|
|
// *AdminMessage_RegistrationResponse
|
|
// *AdminMessage_HeartbeatResponse
|
|
// *AdminMessage_TaskAssignment
|
|
// *AdminMessage_TaskCancellation
|
|
// *AdminMessage_AdminShutdown
|
|
// *AdminMessage_TaskLogRequest
|
|
Message isAdminMessage_Message `protobuf_oneof:"message"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *AdminMessage) Reset() {
|
|
*x = AdminMessage{}
|
|
mi := &file_worker_proto_msgTypes[1]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *AdminMessage) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*AdminMessage) ProtoMessage() {}
|
|
|
|
func (x *AdminMessage) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[1]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use AdminMessage.ProtoReflect.Descriptor instead.
|
|
func (*AdminMessage) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{1}
|
|
}
|
|
|
|
func (x *AdminMessage) GetAdminId() string {
|
|
if x != nil {
|
|
return x.AdminId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *AdminMessage) GetTimestamp() int64 {
|
|
if x != nil {
|
|
return x.Timestamp
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *AdminMessage) GetMessage() isAdminMessage_Message {
|
|
if x != nil {
|
|
return x.Message
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *AdminMessage) GetRegistrationResponse() *RegistrationResponse {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*AdminMessage_RegistrationResponse); ok {
|
|
return x.RegistrationResponse
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *AdminMessage) GetHeartbeatResponse() *HeartbeatResponse {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*AdminMessage_HeartbeatResponse); ok {
|
|
return x.HeartbeatResponse
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *AdminMessage) GetTaskAssignment() *TaskAssignment {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*AdminMessage_TaskAssignment); ok {
|
|
return x.TaskAssignment
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *AdminMessage) GetTaskCancellation() *TaskCancellation {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*AdminMessage_TaskCancellation); ok {
|
|
return x.TaskCancellation
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *AdminMessage) GetAdminShutdown() *AdminShutdown {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*AdminMessage_AdminShutdown); ok {
|
|
return x.AdminShutdown
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *AdminMessage) GetTaskLogRequest() *TaskLogRequest {
|
|
if x != nil {
|
|
if x, ok := x.Message.(*AdminMessage_TaskLogRequest); ok {
|
|
return x.TaskLogRequest
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type isAdminMessage_Message interface {
|
|
isAdminMessage_Message()
|
|
}
|
|
|
|
type AdminMessage_RegistrationResponse struct {
|
|
RegistrationResponse *RegistrationResponse `protobuf:"bytes,3,opt,name=registration_response,json=registrationResponse,proto3,oneof"`
|
|
}
|
|
|
|
type AdminMessage_HeartbeatResponse struct {
|
|
HeartbeatResponse *HeartbeatResponse `protobuf:"bytes,4,opt,name=heartbeat_response,json=heartbeatResponse,proto3,oneof"`
|
|
}
|
|
|
|
type AdminMessage_TaskAssignment struct {
|
|
TaskAssignment *TaskAssignment `protobuf:"bytes,5,opt,name=task_assignment,json=taskAssignment,proto3,oneof"`
|
|
}
|
|
|
|
type AdminMessage_TaskCancellation struct {
|
|
TaskCancellation *TaskCancellation `protobuf:"bytes,6,opt,name=task_cancellation,json=taskCancellation,proto3,oneof"`
|
|
}
|
|
|
|
type AdminMessage_AdminShutdown struct {
|
|
AdminShutdown *AdminShutdown `protobuf:"bytes,7,opt,name=admin_shutdown,json=adminShutdown,proto3,oneof"`
|
|
}
|
|
|
|
type AdminMessage_TaskLogRequest struct {
|
|
TaskLogRequest *TaskLogRequest `protobuf:"bytes,8,opt,name=task_log_request,json=taskLogRequest,proto3,oneof"`
|
|
}
|
|
|
|
func (*AdminMessage_RegistrationResponse) isAdminMessage_Message() {}
|
|
|
|
func (*AdminMessage_HeartbeatResponse) isAdminMessage_Message() {}
|
|
|
|
func (*AdminMessage_TaskAssignment) isAdminMessage_Message() {}
|
|
|
|
func (*AdminMessage_TaskCancellation) isAdminMessage_Message() {}
|
|
|
|
func (*AdminMessage_AdminShutdown) isAdminMessage_Message() {}
|
|
|
|
func (*AdminMessage_TaskLogRequest) isAdminMessage_Message() {}
|
|
|
|
// WorkerRegistration message when worker connects
|
|
type WorkerRegistration struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
|
|
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
|
|
Capabilities []string `protobuf:"bytes,3,rep,name=capabilities,proto3" json:"capabilities,omitempty"`
|
|
MaxConcurrent int32 `protobuf:"varint,4,opt,name=max_concurrent,json=maxConcurrent,proto3" json:"max_concurrent,omitempty"`
|
|
Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *WorkerRegistration) Reset() {
|
|
*x = WorkerRegistration{}
|
|
mi := &file_worker_proto_msgTypes[2]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *WorkerRegistration) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*WorkerRegistration) ProtoMessage() {}
|
|
|
|
func (x *WorkerRegistration) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[2]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use WorkerRegistration.ProtoReflect.Descriptor instead.
|
|
func (*WorkerRegistration) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{2}
|
|
}
|
|
|
|
func (x *WorkerRegistration) GetWorkerId() string {
|
|
if x != nil {
|
|
return x.WorkerId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *WorkerRegistration) GetAddress() string {
|
|
if x != nil {
|
|
return x.Address
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *WorkerRegistration) GetCapabilities() []string {
|
|
if x != nil {
|
|
return x.Capabilities
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *WorkerRegistration) GetMaxConcurrent() int32 {
|
|
if x != nil {
|
|
return x.MaxConcurrent
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *WorkerRegistration) GetMetadata() map[string]string {
|
|
if x != nil {
|
|
return x.Metadata
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// RegistrationResponse confirms worker registration
|
|
type RegistrationResponse struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
|
|
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
|
AssignedWorkerId string `protobuf:"bytes,3,opt,name=assigned_worker_id,json=assignedWorkerId,proto3" json:"assigned_worker_id,omitempty"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *RegistrationResponse) Reset() {
|
|
*x = RegistrationResponse{}
|
|
mi := &file_worker_proto_msgTypes[3]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *RegistrationResponse) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*RegistrationResponse) ProtoMessage() {}
|
|
|
|
func (x *RegistrationResponse) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[3]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use RegistrationResponse.ProtoReflect.Descriptor instead.
|
|
func (*RegistrationResponse) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{3}
|
|
}
|
|
|
|
func (x *RegistrationResponse) GetSuccess() bool {
|
|
if x != nil {
|
|
return x.Success
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (x *RegistrationResponse) GetMessage() string {
|
|
if x != nil {
|
|
return x.Message
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *RegistrationResponse) GetAssignedWorkerId() string {
|
|
if x != nil {
|
|
return x.AssignedWorkerId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
// WorkerHeartbeat sent periodically by worker
|
|
type WorkerHeartbeat struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
|
|
Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
|
|
CurrentLoad int32 `protobuf:"varint,3,opt,name=current_load,json=currentLoad,proto3" json:"current_load,omitempty"`
|
|
MaxConcurrent int32 `protobuf:"varint,4,opt,name=max_concurrent,json=maxConcurrent,proto3" json:"max_concurrent,omitempty"`
|
|
CurrentTaskIds []string `protobuf:"bytes,5,rep,name=current_task_ids,json=currentTaskIds,proto3" json:"current_task_ids,omitempty"`
|
|
TasksCompleted int32 `protobuf:"varint,6,opt,name=tasks_completed,json=tasksCompleted,proto3" json:"tasks_completed,omitempty"`
|
|
TasksFailed int32 `protobuf:"varint,7,opt,name=tasks_failed,json=tasksFailed,proto3" json:"tasks_failed,omitempty"`
|
|
UptimeSeconds int64 `protobuf:"varint,8,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *WorkerHeartbeat) Reset() {
|
|
*x = WorkerHeartbeat{}
|
|
mi := &file_worker_proto_msgTypes[4]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *WorkerHeartbeat) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*WorkerHeartbeat) ProtoMessage() {}
|
|
|
|
func (x *WorkerHeartbeat) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[4]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use WorkerHeartbeat.ProtoReflect.Descriptor instead.
|
|
func (*WorkerHeartbeat) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{4}
|
|
}
|
|
|
|
func (x *WorkerHeartbeat) GetWorkerId() string {
|
|
if x != nil {
|
|
return x.WorkerId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *WorkerHeartbeat) GetStatus() string {
|
|
if x != nil {
|
|
return x.Status
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *WorkerHeartbeat) GetCurrentLoad() int32 {
|
|
if x != nil {
|
|
return x.CurrentLoad
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *WorkerHeartbeat) GetMaxConcurrent() int32 {
|
|
if x != nil {
|
|
return x.MaxConcurrent
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *WorkerHeartbeat) GetCurrentTaskIds() []string {
|
|
if x != nil {
|
|
return x.CurrentTaskIds
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *WorkerHeartbeat) GetTasksCompleted() int32 {
|
|
if x != nil {
|
|
return x.TasksCompleted
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *WorkerHeartbeat) GetTasksFailed() int32 {
|
|
if x != nil {
|
|
return x.TasksFailed
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *WorkerHeartbeat) GetUptimeSeconds() int64 {
|
|
if x != nil {
|
|
return x.UptimeSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// HeartbeatResponse acknowledges heartbeat
|
|
type HeartbeatResponse struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
|
|
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *HeartbeatResponse) Reset() {
|
|
*x = HeartbeatResponse{}
|
|
mi := &file_worker_proto_msgTypes[5]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *HeartbeatResponse) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*HeartbeatResponse) ProtoMessage() {}
|
|
|
|
func (x *HeartbeatResponse) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[5]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use HeartbeatResponse.ProtoReflect.Descriptor instead.
|
|
func (*HeartbeatResponse) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{5}
|
|
}
|
|
|
|
func (x *HeartbeatResponse) GetSuccess() bool {
|
|
if x != nil {
|
|
return x.Success
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (x *HeartbeatResponse) GetMessage() string {
|
|
if x != nil {
|
|
return x.Message
|
|
}
|
|
return ""
|
|
}
|
|
|
|
// TaskRequest from worker asking for new tasks
|
|
type TaskRequest struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
|
|
Capabilities []string `protobuf:"bytes,2,rep,name=capabilities,proto3" json:"capabilities,omitempty"`
|
|
AvailableSlots int32 `protobuf:"varint,3,opt,name=available_slots,json=availableSlots,proto3" json:"available_slots,omitempty"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *TaskRequest) Reset() {
|
|
*x = TaskRequest{}
|
|
mi := &file_worker_proto_msgTypes[6]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *TaskRequest) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*TaskRequest) ProtoMessage() {}
|
|
|
|
func (x *TaskRequest) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[6]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use TaskRequest.ProtoReflect.Descriptor instead.
|
|
func (*TaskRequest) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{6}
|
|
}
|
|
|
|
func (x *TaskRequest) GetWorkerId() string {
|
|
if x != nil {
|
|
return x.WorkerId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskRequest) GetCapabilities() []string {
|
|
if x != nil {
|
|
return x.Capabilities
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskRequest) GetAvailableSlots() int32 {
|
|
if x != nil {
|
|
return x.AvailableSlots
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// TaskAssignment from admin to worker
|
|
type TaskAssignment struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
|
TaskType string `protobuf:"bytes,2,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
|
|
Params *TaskParams `protobuf:"bytes,3,opt,name=params,proto3" json:"params,omitempty"`
|
|
Priority int32 `protobuf:"varint,4,opt,name=priority,proto3" json:"priority,omitempty"`
|
|
CreatedTime int64 `protobuf:"varint,5,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"`
|
|
Metadata map[string]string `protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *TaskAssignment) Reset() {
|
|
*x = TaskAssignment{}
|
|
mi := &file_worker_proto_msgTypes[7]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *TaskAssignment) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*TaskAssignment) ProtoMessage() {}
|
|
|
|
func (x *TaskAssignment) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[7]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use TaskAssignment.ProtoReflect.Descriptor instead.
|
|
func (*TaskAssignment) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{7}
|
|
}
|
|
|
|
func (x *TaskAssignment) GetTaskId() string {
|
|
if x != nil {
|
|
return x.TaskId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskAssignment) GetTaskType() string {
|
|
if x != nil {
|
|
return x.TaskType
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskAssignment) GetParams() *TaskParams {
|
|
if x != nil {
|
|
return x.Params
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskAssignment) GetPriority() int32 {
|
|
if x != nil {
|
|
return x.Priority
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskAssignment) GetCreatedTime() int64 {
|
|
if x != nil {
|
|
return x.CreatedTime
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskAssignment) GetMetadata() map[string]string {
|
|
if x != nil {
|
|
return x.Metadata
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// TaskParams contains task-specific parameters with typed variants
|
|
type TaskParams struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
TaskId string `protobuf:"bytes,12,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` // ActiveTopology task ID for lifecycle management
|
|
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
|
|
Server string `protobuf:"bytes,2,opt,name=server,proto3" json:"server,omitempty"`
|
|
Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
|
|
DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"`
|
|
Rack string `protobuf:"bytes,5,opt,name=rack,proto3" json:"rack,omitempty"`
|
|
Replicas []string `protobuf:"bytes,6,rep,name=replicas,proto3" json:"replicas,omitempty"`
|
|
VolumeSize uint64 `protobuf:"varint,11,opt,name=volume_size,json=volumeSize,proto3" json:"volume_size,omitempty"` // Original volume size in bytes for tracking size changes
|
|
// Typed task parameters
|
|
//
|
|
// Types that are valid to be assigned to TaskParams:
|
|
//
|
|
// *TaskParams_VacuumParams
|
|
// *TaskParams_ErasureCodingParams
|
|
// *TaskParams_BalanceParams
|
|
// *TaskParams_ReplicationParams
|
|
TaskParams isTaskParams_TaskParams `protobuf_oneof:"task_params"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *TaskParams) Reset() {
|
|
*x = TaskParams{}
|
|
mi := &file_worker_proto_msgTypes[8]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *TaskParams) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*TaskParams) ProtoMessage() {}
|
|
|
|
func (x *TaskParams) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[8]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use TaskParams.ProtoReflect.Descriptor instead.
|
|
func (*TaskParams) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{8}
|
|
}
|
|
|
|
func (x *TaskParams) GetTaskId() string {
|
|
if x != nil {
|
|
return x.TaskId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskParams) GetVolumeId() uint32 {
|
|
if x != nil {
|
|
return x.VolumeId
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskParams) GetServer() string {
|
|
if x != nil {
|
|
return x.Server
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskParams) GetCollection() string {
|
|
if x != nil {
|
|
return x.Collection
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskParams) GetDataCenter() string {
|
|
if x != nil {
|
|
return x.DataCenter
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskParams) GetRack() string {
|
|
if x != nil {
|
|
return x.Rack
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskParams) GetReplicas() []string {
|
|
if x != nil {
|
|
return x.Replicas
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskParams) GetVolumeSize() uint64 {
|
|
if x != nil {
|
|
return x.VolumeSize
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskParams) GetTaskParams() isTaskParams_TaskParams {
|
|
if x != nil {
|
|
return x.TaskParams
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskParams) GetVacuumParams() *VacuumTaskParams {
|
|
if x != nil {
|
|
if x, ok := x.TaskParams.(*TaskParams_VacuumParams); ok {
|
|
return x.VacuumParams
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskParams) GetErasureCodingParams() *ErasureCodingTaskParams {
|
|
if x != nil {
|
|
if x, ok := x.TaskParams.(*TaskParams_ErasureCodingParams); ok {
|
|
return x.ErasureCodingParams
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskParams) GetBalanceParams() *BalanceTaskParams {
|
|
if x != nil {
|
|
if x, ok := x.TaskParams.(*TaskParams_BalanceParams); ok {
|
|
return x.BalanceParams
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskParams) GetReplicationParams() *ReplicationTaskParams {
|
|
if x != nil {
|
|
if x, ok := x.TaskParams.(*TaskParams_ReplicationParams); ok {
|
|
return x.ReplicationParams
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type isTaskParams_TaskParams interface {
|
|
isTaskParams_TaskParams()
|
|
}
|
|
|
|
type TaskParams_VacuumParams struct {
|
|
VacuumParams *VacuumTaskParams `protobuf:"bytes,7,opt,name=vacuum_params,json=vacuumParams,proto3,oneof"`
|
|
}
|
|
|
|
type TaskParams_ErasureCodingParams struct {
|
|
ErasureCodingParams *ErasureCodingTaskParams `protobuf:"bytes,8,opt,name=erasure_coding_params,json=erasureCodingParams,proto3,oneof"`
|
|
}
|
|
|
|
type TaskParams_BalanceParams struct {
|
|
BalanceParams *BalanceTaskParams `protobuf:"bytes,9,opt,name=balance_params,json=balanceParams,proto3,oneof"`
|
|
}
|
|
|
|
type TaskParams_ReplicationParams struct {
|
|
ReplicationParams *ReplicationTaskParams `protobuf:"bytes,10,opt,name=replication_params,json=replicationParams,proto3,oneof"`
|
|
}
|
|
|
|
func (*TaskParams_VacuumParams) isTaskParams_TaskParams() {}
|
|
|
|
func (*TaskParams_ErasureCodingParams) isTaskParams_TaskParams() {}
|
|
|
|
func (*TaskParams_BalanceParams) isTaskParams_TaskParams() {}
|
|
|
|
func (*TaskParams_ReplicationParams) isTaskParams_TaskParams() {}
|
|
|
|
// VacuumTaskParams for vacuum operations
|
|
type VacuumTaskParams struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
GarbageThreshold float64 `protobuf:"fixed64,1,opt,name=garbage_threshold,json=garbageThreshold,proto3" json:"garbage_threshold,omitempty"` // Minimum garbage ratio to trigger vacuum
|
|
ForceVacuum bool `protobuf:"varint,2,opt,name=force_vacuum,json=forceVacuum,proto3" json:"force_vacuum,omitempty"` // Force vacuum even if below threshold
|
|
BatchSize int32 `protobuf:"varint,3,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` // Number of files to process per batch
|
|
WorkingDir string `protobuf:"bytes,4,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` // Working directory for temporary files
|
|
VerifyChecksum bool `protobuf:"varint,5,opt,name=verify_checksum,json=verifyChecksum,proto3" json:"verify_checksum,omitempty"` // Verify file checksums during vacuum
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *VacuumTaskParams) Reset() {
|
|
*x = VacuumTaskParams{}
|
|
mi := &file_worker_proto_msgTypes[9]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *VacuumTaskParams) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*VacuumTaskParams) ProtoMessage() {}
|
|
|
|
func (x *VacuumTaskParams) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[9]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use VacuumTaskParams.ProtoReflect.Descriptor instead.
|
|
func (*VacuumTaskParams) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{9}
|
|
}
|
|
|
|
func (x *VacuumTaskParams) GetGarbageThreshold() float64 {
|
|
if x != nil {
|
|
return x.GarbageThreshold
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *VacuumTaskParams) GetForceVacuum() bool {
|
|
if x != nil {
|
|
return x.ForceVacuum
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (x *VacuumTaskParams) GetBatchSize() int32 {
|
|
if x != nil {
|
|
return x.BatchSize
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *VacuumTaskParams) GetWorkingDir() string {
|
|
if x != nil {
|
|
return x.WorkingDir
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *VacuumTaskParams) GetVerifyChecksum() bool {
|
|
if x != nil {
|
|
return x.VerifyChecksum
|
|
}
|
|
return false
|
|
}
|
|
|
|
// ErasureCodingTaskParams for EC encoding operations
|
|
type ErasureCodingTaskParams struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
EstimatedShardSize uint64 `protobuf:"varint,3,opt,name=estimated_shard_size,json=estimatedShardSize,proto3" json:"estimated_shard_size,omitempty"` // Estimated size per shard
|
|
DataShards int32 `protobuf:"varint,4,opt,name=data_shards,json=dataShards,proto3" json:"data_shards,omitempty"` // Number of data shards (default: 10)
|
|
ParityShards int32 `protobuf:"varint,5,opt,name=parity_shards,json=parityShards,proto3" json:"parity_shards,omitempty"` // Number of parity shards (default: 4)
|
|
WorkingDir string `protobuf:"bytes,6,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` // Working directory for EC processing
|
|
MasterClient string `protobuf:"bytes,7,opt,name=master_client,json=masterClient,proto3" json:"master_client,omitempty"` // Master server address
|
|
CleanupSource bool `protobuf:"varint,8,opt,name=cleanup_source,json=cleanupSource,proto3" json:"cleanup_source,omitempty"` // Whether to cleanup source volume after EC
|
|
PlacementConflicts []string `protobuf:"bytes,9,rep,name=placement_conflicts,json=placementConflicts,proto3" json:"placement_conflicts,omitempty"` // Any placement rule conflicts
|
|
Destinations []*ECDestination `protobuf:"bytes,10,rep,name=destinations,proto3" json:"destinations,omitempty"` // Planned destinations with disk information
|
|
ExistingShardLocations []*ExistingECShardLocation `protobuf:"bytes,11,rep,name=existing_shard_locations,json=existingShardLocations,proto3" json:"existing_shard_locations,omitempty"` // Existing EC shards to cleanup
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *ErasureCodingTaskParams) Reset() {
|
|
*x = ErasureCodingTaskParams{}
|
|
mi := &file_worker_proto_msgTypes[10]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *ErasureCodingTaskParams) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*ErasureCodingTaskParams) ProtoMessage() {}
|
|
|
|
func (x *ErasureCodingTaskParams) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[10]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use ErasureCodingTaskParams.ProtoReflect.Descriptor instead.
|
|
func (*ErasureCodingTaskParams) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{10}
|
|
}
|
|
|
|
func (x *ErasureCodingTaskParams) GetEstimatedShardSize() uint64 {
|
|
if x != nil {
|
|
return x.EstimatedShardSize
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *ErasureCodingTaskParams) GetDataShards() int32 {
|
|
if x != nil {
|
|
return x.DataShards
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *ErasureCodingTaskParams) GetParityShards() int32 {
|
|
if x != nil {
|
|
return x.ParityShards
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *ErasureCodingTaskParams) GetWorkingDir() string {
|
|
if x != nil {
|
|
return x.WorkingDir
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *ErasureCodingTaskParams) GetMasterClient() string {
|
|
if x != nil {
|
|
return x.MasterClient
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *ErasureCodingTaskParams) GetCleanupSource() bool {
|
|
if x != nil {
|
|
return x.CleanupSource
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (x *ErasureCodingTaskParams) GetPlacementConflicts() []string {
|
|
if x != nil {
|
|
return x.PlacementConflicts
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *ErasureCodingTaskParams) GetDestinations() []*ECDestination {
|
|
if x != nil {
|
|
return x.Destinations
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *ErasureCodingTaskParams) GetExistingShardLocations() []*ExistingECShardLocation {
|
|
if x != nil {
|
|
return x.ExistingShardLocations
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// ECDestination represents a planned destination for EC shards with disk information
|
|
type ECDestination struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // Target server address
|
|
DiskId uint32 `protobuf:"varint,2,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` // Target disk ID
|
|
Rack string `protobuf:"bytes,3,opt,name=rack,proto3" json:"rack,omitempty"` // Target rack for placement tracking
|
|
DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // Target data center for placement tracking
|
|
PlacementScore float64 `protobuf:"fixed64,5,opt,name=placement_score,json=placementScore,proto3" json:"placement_score,omitempty"` // Quality score of the placement
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *ECDestination) Reset() {
|
|
*x = ECDestination{}
|
|
mi := &file_worker_proto_msgTypes[11]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *ECDestination) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*ECDestination) ProtoMessage() {}
|
|
|
|
func (x *ECDestination) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[11]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use ECDestination.ProtoReflect.Descriptor instead.
|
|
func (*ECDestination) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{11}
|
|
}
|
|
|
|
func (x *ECDestination) GetNode() string {
|
|
if x != nil {
|
|
return x.Node
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *ECDestination) GetDiskId() uint32 {
|
|
if x != nil {
|
|
return x.DiskId
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *ECDestination) GetRack() string {
|
|
if x != nil {
|
|
return x.Rack
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *ECDestination) GetDataCenter() string {
|
|
if x != nil {
|
|
return x.DataCenter
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *ECDestination) GetPlacementScore() float64 {
|
|
if x != nil {
|
|
return x.PlacementScore
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// ExistingECShardLocation represents existing EC shards that need cleanup
|
|
type ExistingECShardLocation struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // Server address with existing shards
|
|
ShardIds []uint32 `protobuf:"varint,2,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` // List of shard IDs on this server
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *ExistingECShardLocation) Reset() {
|
|
*x = ExistingECShardLocation{}
|
|
mi := &file_worker_proto_msgTypes[12]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *ExistingECShardLocation) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*ExistingECShardLocation) ProtoMessage() {}
|
|
|
|
func (x *ExistingECShardLocation) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[12]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use ExistingECShardLocation.ProtoReflect.Descriptor instead.
|
|
func (*ExistingECShardLocation) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{12}
|
|
}
|
|
|
|
func (x *ExistingECShardLocation) GetNode() string {
|
|
if x != nil {
|
|
return x.Node
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *ExistingECShardLocation) GetShardIds() []uint32 {
|
|
if x != nil {
|
|
return x.ShardIds
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// BalanceTaskParams for volume balancing operations
|
|
type BalanceTaskParams struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
DestNode string `protobuf:"bytes,1,opt,name=dest_node,json=destNode,proto3" json:"dest_node,omitempty"` // Planned destination node
|
|
EstimatedSize uint64 `protobuf:"varint,2,opt,name=estimated_size,json=estimatedSize,proto3" json:"estimated_size,omitempty"` // Estimated volume size
|
|
DestRack string `protobuf:"bytes,3,opt,name=dest_rack,json=destRack,proto3" json:"dest_rack,omitempty"` // Destination rack for placement rules
|
|
DestDc string `protobuf:"bytes,4,opt,name=dest_dc,json=destDc,proto3" json:"dest_dc,omitempty"` // Destination data center
|
|
PlacementScore float64 `protobuf:"fixed64,5,opt,name=placement_score,json=placementScore,proto3" json:"placement_score,omitempty"` // Quality score of the planned placement
|
|
PlacementConflicts []string `protobuf:"bytes,6,rep,name=placement_conflicts,json=placementConflicts,proto3" json:"placement_conflicts,omitempty"` // Any placement rule conflicts
|
|
ForceMove bool `protobuf:"varint,7,opt,name=force_move,json=forceMove,proto3" json:"force_move,omitempty"` // Force move even with conflicts
|
|
TimeoutSeconds int32 `protobuf:"varint,8,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` // Operation timeout
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *BalanceTaskParams) Reset() {
|
|
*x = BalanceTaskParams{}
|
|
mi := &file_worker_proto_msgTypes[13]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *BalanceTaskParams) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*BalanceTaskParams) ProtoMessage() {}
|
|
|
|
func (x *BalanceTaskParams) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[13]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use BalanceTaskParams.ProtoReflect.Descriptor instead.
|
|
func (*BalanceTaskParams) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{13}
|
|
}
|
|
|
|
func (x *BalanceTaskParams) GetDestNode() string {
|
|
if x != nil {
|
|
return x.DestNode
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *BalanceTaskParams) GetEstimatedSize() uint64 {
|
|
if x != nil {
|
|
return x.EstimatedSize
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *BalanceTaskParams) GetDestRack() string {
|
|
if x != nil {
|
|
return x.DestRack
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *BalanceTaskParams) GetDestDc() string {
|
|
if x != nil {
|
|
return x.DestDc
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *BalanceTaskParams) GetPlacementScore() float64 {
|
|
if x != nil {
|
|
return x.PlacementScore
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *BalanceTaskParams) GetPlacementConflicts() []string {
|
|
if x != nil {
|
|
return x.PlacementConflicts
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *BalanceTaskParams) GetForceMove() bool {
|
|
if x != nil {
|
|
return x.ForceMove
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (x *BalanceTaskParams) GetTimeoutSeconds() int32 {
|
|
if x != nil {
|
|
return x.TimeoutSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// ReplicationTaskParams for adding replicas
|
|
type ReplicationTaskParams struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
DestNode string `protobuf:"bytes,1,opt,name=dest_node,json=destNode,proto3" json:"dest_node,omitempty"` // Planned destination node for new replica
|
|
EstimatedSize uint64 `protobuf:"varint,2,opt,name=estimated_size,json=estimatedSize,proto3" json:"estimated_size,omitempty"` // Estimated replica size
|
|
DestRack string `protobuf:"bytes,3,opt,name=dest_rack,json=destRack,proto3" json:"dest_rack,omitempty"` // Destination rack for placement rules
|
|
DestDc string `protobuf:"bytes,4,opt,name=dest_dc,json=destDc,proto3" json:"dest_dc,omitempty"` // Destination data center
|
|
PlacementScore float64 `protobuf:"fixed64,5,opt,name=placement_score,json=placementScore,proto3" json:"placement_score,omitempty"` // Quality score of the planned placement
|
|
PlacementConflicts []string `protobuf:"bytes,6,rep,name=placement_conflicts,json=placementConflicts,proto3" json:"placement_conflicts,omitempty"` // Any placement rule conflicts
|
|
ReplicaCount int32 `protobuf:"varint,7,opt,name=replica_count,json=replicaCount,proto3" json:"replica_count,omitempty"` // Target replica count
|
|
VerifyConsistency bool `protobuf:"varint,8,opt,name=verify_consistency,json=verifyConsistency,proto3" json:"verify_consistency,omitempty"` // Verify replica consistency after creation
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *ReplicationTaskParams) Reset() {
|
|
*x = ReplicationTaskParams{}
|
|
mi := &file_worker_proto_msgTypes[14]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *ReplicationTaskParams) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*ReplicationTaskParams) ProtoMessage() {}
|
|
|
|
func (x *ReplicationTaskParams) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[14]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use ReplicationTaskParams.ProtoReflect.Descriptor instead.
|
|
func (*ReplicationTaskParams) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{14}
|
|
}
|
|
|
|
func (x *ReplicationTaskParams) GetDestNode() string {
|
|
if x != nil {
|
|
return x.DestNode
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *ReplicationTaskParams) GetEstimatedSize() uint64 {
|
|
if x != nil {
|
|
return x.EstimatedSize
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *ReplicationTaskParams) GetDestRack() string {
|
|
if x != nil {
|
|
return x.DestRack
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *ReplicationTaskParams) GetDestDc() string {
|
|
if x != nil {
|
|
return x.DestDc
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *ReplicationTaskParams) GetPlacementScore() float64 {
|
|
if x != nil {
|
|
return x.PlacementScore
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *ReplicationTaskParams) GetPlacementConflicts() []string {
|
|
if x != nil {
|
|
return x.PlacementConflicts
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *ReplicationTaskParams) GetReplicaCount() int32 {
|
|
if x != nil {
|
|
return x.ReplicaCount
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *ReplicationTaskParams) GetVerifyConsistency() bool {
|
|
if x != nil {
|
|
return x.VerifyConsistency
|
|
}
|
|
return false
|
|
}
|
|
|
|
// TaskUpdate reports task progress
|
|
type TaskUpdate struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
|
WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
|
|
Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
|
|
Progress float32 `protobuf:"fixed32,4,opt,name=progress,proto3" json:"progress,omitempty"`
|
|
Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"`
|
|
Metadata map[string]string `protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *TaskUpdate) Reset() {
|
|
*x = TaskUpdate{}
|
|
mi := &file_worker_proto_msgTypes[15]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *TaskUpdate) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*TaskUpdate) ProtoMessage() {}
|
|
|
|
func (x *TaskUpdate) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[15]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use TaskUpdate.ProtoReflect.Descriptor instead.
|
|
func (*TaskUpdate) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{15}
|
|
}
|
|
|
|
func (x *TaskUpdate) GetTaskId() string {
|
|
if x != nil {
|
|
return x.TaskId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskUpdate) GetWorkerId() string {
|
|
if x != nil {
|
|
return x.WorkerId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskUpdate) GetStatus() string {
|
|
if x != nil {
|
|
return x.Status
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskUpdate) GetProgress() float32 {
|
|
if x != nil {
|
|
return x.Progress
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskUpdate) GetMessage() string {
|
|
if x != nil {
|
|
return x.Message
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskUpdate) GetMetadata() map[string]string {
|
|
if x != nil {
|
|
return x.Metadata
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// TaskComplete reports task completion
|
|
type TaskComplete struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
|
WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
|
|
Success bool `protobuf:"varint,3,opt,name=success,proto3" json:"success,omitempty"`
|
|
ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
|
|
CompletionTime int64 `protobuf:"varint,5,opt,name=completion_time,json=completionTime,proto3" json:"completion_time,omitempty"`
|
|
ResultMetadata map[string]string `protobuf:"bytes,6,rep,name=result_metadata,json=resultMetadata,proto3" json:"result_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *TaskComplete) Reset() {
|
|
*x = TaskComplete{}
|
|
mi := &file_worker_proto_msgTypes[16]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *TaskComplete) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*TaskComplete) ProtoMessage() {}
|
|
|
|
func (x *TaskComplete) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[16]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use TaskComplete.ProtoReflect.Descriptor instead.
|
|
func (*TaskComplete) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{16}
|
|
}
|
|
|
|
func (x *TaskComplete) GetTaskId() string {
|
|
if x != nil {
|
|
return x.TaskId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskComplete) GetWorkerId() string {
|
|
if x != nil {
|
|
return x.WorkerId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskComplete) GetSuccess() bool {
|
|
if x != nil {
|
|
return x.Success
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (x *TaskComplete) GetErrorMessage() string {
|
|
if x != nil {
|
|
return x.ErrorMessage
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskComplete) GetCompletionTime() int64 {
|
|
if x != nil {
|
|
return x.CompletionTime
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskComplete) GetResultMetadata() map[string]string {
|
|
if x != nil {
|
|
return x.ResultMetadata
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// TaskCancellation from admin to cancel a task
|
|
type TaskCancellation struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
|
Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"`
|
|
Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *TaskCancellation) Reset() {
|
|
*x = TaskCancellation{}
|
|
mi := &file_worker_proto_msgTypes[17]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *TaskCancellation) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*TaskCancellation) ProtoMessage() {}
|
|
|
|
func (x *TaskCancellation) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[17]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use TaskCancellation.ProtoReflect.Descriptor instead.
|
|
func (*TaskCancellation) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{17}
|
|
}
|
|
|
|
func (x *TaskCancellation) GetTaskId() string {
|
|
if x != nil {
|
|
return x.TaskId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskCancellation) GetReason() string {
|
|
if x != nil {
|
|
return x.Reason
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskCancellation) GetForce() bool {
|
|
if x != nil {
|
|
return x.Force
|
|
}
|
|
return false
|
|
}
|
|
|
|
// WorkerShutdown notifies admin that worker is shutting down
|
|
type WorkerShutdown struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
|
|
Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"`
|
|
PendingTaskIds []string `protobuf:"bytes,3,rep,name=pending_task_ids,json=pendingTaskIds,proto3" json:"pending_task_ids,omitempty"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *WorkerShutdown) Reset() {
|
|
*x = WorkerShutdown{}
|
|
mi := &file_worker_proto_msgTypes[18]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *WorkerShutdown) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*WorkerShutdown) ProtoMessage() {}
|
|
|
|
func (x *WorkerShutdown) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[18]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use WorkerShutdown.ProtoReflect.Descriptor instead.
|
|
func (*WorkerShutdown) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{18}
|
|
}
|
|
|
|
func (x *WorkerShutdown) GetWorkerId() string {
|
|
if x != nil {
|
|
return x.WorkerId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *WorkerShutdown) GetReason() string {
|
|
if x != nil {
|
|
return x.Reason
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *WorkerShutdown) GetPendingTaskIds() []string {
|
|
if x != nil {
|
|
return x.PendingTaskIds
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// AdminShutdown notifies worker that admin is shutting down
|
|
type AdminShutdown struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"`
|
|
GracefulShutdownSeconds int32 `protobuf:"varint,2,opt,name=graceful_shutdown_seconds,json=gracefulShutdownSeconds,proto3" json:"graceful_shutdown_seconds,omitempty"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *AdminShutdown) Reset() {
|
|
*x = AdminShutdown{}
|
|
mi := &file_worker_proto_msgTypes[19]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *AdminShutdown) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*AdminShutdown) ProtoMessage() {}
|
|
|
|
func (x *AdminShutdown) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[19]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use AdminShutdown.ProtoReflect.Descriptor instead.
|
|
func (*AdminShutdown) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{19}
|
|
}
|
|
|
|
func (x *AdminShutdown) GetReason() string {
|
|
if x != nil {
|
|
return x.Reason
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *AdminShutdown) GetGracefulShutdownSeconds() int32 {
|
|
if x != nil {
|
|
return x.GracefulShutdownSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// TaskLogRequest requests logs for a specific task
|
|
type TaskLogRequest struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
|
WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
|
|
IncludeMetadata bool `protobuf:"varint,3,opt,name=include_metadata,json=includeMetadata,proto3" json:"include_metadata,omitempty"` // Include task metadata
|
|
MaxEntries int32 `protobuf:"varint,4,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` // Maximum number of log entries (0 = all)
|
|
LogLevel string `protobuf:"bytes,5,opt,name=log_level,json=logLevel,proto3" json:"log_level,omitempty"` // Filter by log level (INFO, WARNING, ERROR, DEBUG)
|
|
StartTime int64 `protobuf:"varint,6,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` // Unix timestamp for start time filter
|
|
EndTime int64 `protobuf:"varint,7,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` // Unix timestamp for end time filter
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *TaskLogRequest) Reset() {
|
|
*x = TaskLogRequest{}
|
|
mi := &file_worker_proto_msgTypes[20]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *TaskLogRequest) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*TaskLogRequest) ProtoMessage() {}
|
|
|
|
func (x *TaskLogRequest) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[20]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use TaskLogRequest.ProtoReflect.Descriptor instead.
|
|
func (*TaskLogRequest) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{20}
|
|
}
|
|
|
|
func (x *TaskLogRequest) GetTaskId() string {
|
|
if x != nil {
|
|
return x.TaskId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogRequest) GetWorkerId() string {
|
|
if x != nil {
|
|
return x.WorkerId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogRequest) GetIncludeMetadata() bool {
|
|
if x != nil {
|
|
return x.IncludeMetadata
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (x *TaskLogRequest) GetMaxEntries() int32 {
|
|
if x != nil {
|
|
return x.MaxEntries
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskLogRequest) GetLogLevel() string {
|
|
if x != nil {
|
|
return x.LogLevel
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogRequest) GetStartTime() int64 {
|
|
if x != nil {
|
|
return x.StartTime
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskLogRequest) GetEndTime() int64 {
|
|
if x != nil {
|
|
return x.EndTime
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// TaskLogResponse returns task logs and metadata
|
|
type TaskLogResponse struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
|
WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
|
|
Success bool `protobuf:"varint,3,opt,name=success,proto3" json:"success,omitempty"`
|
|
ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
|
|
Metadata *TaskLogMetadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
|
LogEntries []*TaskLogEntry `protobuf:"bytes,6,rep,name=log_entries,json=logEntries,proto3" json:"log_entries,omitempty"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *TaskLogResponse) Reset() {
|
|
*x = TaskLogResponse{}
|
|
mi := &file_worker_proto_msgTypes[21]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *TaskLogResponse) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*TaskLogResponse) ProtoMessage() {}
|
|
|
|
func (x *TaskLogResponse) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[21]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use TaskLogResponse.ProtoReflect.Descriptor instead.
|
|
func (*TaskLogResponse) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{21}
|
|
}
|
|
|
|
func (x *TaskLogResponse) GetTaskId() string {
|
|
if x != nil {
|
|
return x.TaskId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogResponse) GetWorkerId() string {
|
|
if x != nil {
|
|
return x.WorkerId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogResponse) GetSuccess() bool {
|
|
if x != nil {
|
|
return x.Success
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (x *TaskLogResponse) GetErrorMessage() string {
|
|
if x != nil {
|
|
return x.ErrorMessage
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogResponse) GetMetadata() *TaskLogMetadata {
|
|
if x != nil {
|
|
return x.Metadata
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskLogResponse) GetLogEntries() []*TaskLogEntry {
|
|
if x != nil {
|
|
return x.LogEntries
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// TaskLogMetadata contains metadata about task execution
|
|
type TaskLogMetadata struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
|
TaskType string `protobuf:"bytes,2,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
|
|
WorkerId string `protobuf:"bytes,3,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"`
|
|
StartTime int64 `protobuf:"varint,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
|
|
EndTime int64 `protobuf:"varint,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
|
|
DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"`
|
|
Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"`
|
|
Progress float32 `protobuf:"fixed32,8,opt,name=progress,proto3" json:"progress,omitempty"`
|
|
VolumeId uint32 `protobuf:"varint,9,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
|
|
Server string `protobuf:"bytes,10,opt,name=server,proto3" json:"server,omitempty"`
|
|
Collection string `protobuf:"bytes,11,opt,name=collection,proto3" json:"collection,omitempty"`
|
|
LogFilePath string `protobuf:"bytes,12,opt,name=log_file_path,json=logFilePath,proto3" json:"log_file_path,omitempty"`
|
|
CreatedAt int64 `protobuf:"varint,13,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
|
CustomData map[string]string `protobuf:"bytes,14,rep,name=custom_data,json=customData,proto3" json:"custom_data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *TaskLogMetadata) Reset() {
|
|
*x = TaskLogMetadata{}
|
|
mi := &file_worker_proto_msgTypes[22]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *TaskLogMetadata) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*TaskLogMetadata) ProtoMessage() {}
|
|
|
|
func (x *TaskLogMetadata) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[22]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use TaskLogMetadata.ProtoReflect.Descriptor instead.
|
|
func (*TaskLogMetadata) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{22}
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetTaskId() string {
|
|
if x != nil {
|
|
return x.TaskId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetTaskType() string {
|
|
if x != nil {
|
|
return x.TaskType
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetWorkerId() string {
|
|
if x != nil {
|
|
return x.WorkerId
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetStartTime() int64 {
|
|
if x != nil {
|
|
return x.StartTime
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetEndTime() int64 {
|
|
if x != nil {
|
|
return x.EndTime
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetDurationMs() int64 {
|
|
if x != nil {
|
|
return x.DurationMs
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetStatus() string {
|
|
if x != nil {
|
|
return x.Status
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetProgress() float32 {
|
|
if x != nil {
|
|
return x.Progress
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetVolumeId() uint32 {
|
|
if x != nil {
|
|
return x.VolumeId
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetServer() string {
|
|
if x != nil {
|
|
return x.Server
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetCollection() string {
|
|
if x != nil {
|
|
return x.Collection
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetLogFilePath() string {
|
|
if x != nil {
|
|
return x.LogFilePath
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetCreatedAt() int64 {
|
|
if x != nil {
|
|
return x.CreatedAt
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskLogMetadata) GetCustomData() map[string]string {
|
|
if x != nil {
|
|
return x.CustomData
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// TaskLogEntry represents a single log entry
|
|
type TaskLogEntry struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
|
Level string `protobuf:"bytes,2,opt,name=level,proto3" json:"level,omitempty"`
|
|
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
|
|
Fields map[string]string `protobuf:"bytes,4,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
|
Progress float32 `protobuf:"fixed32,5,opt,name=progress,proto3" json:"progress,omitempty"`
|
|
Status string `protobuf:"bytes,6,opt,name=status,proto3" json:"status,omitempty"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *TaskLogEntry) Reset() {
|
|
*x = TaskLogEntry{}
|
|
mi := &file_worker_proto_msgTypes[23]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *TaskLogEntry) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*TaskLogEntry) ProtoMessage() {}
|
|
|
|
func (x *TaskLogEntry) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[23]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use TaskLogEntry.ProtoReflect.Descriptor instead.
|
|
func (*TaskLogEntry) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{23}
|
|
}
|
|
|
|
func (x *TaskLogEntry) GetTimestamp() int64 {
|
|
if x != nil {
|
|
return x.Timestamp
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskLogEntry) GetLevel() string {
|
|
if x != nil {
|
|
return x.Level
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogEntry) GetMessage() string {
|
|
if x != nil {
|
|
return x.Message
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func (x *TaskLogEntry) GetFields() map[string]string {
|
|
if x != nil {
|
|
return x.Fields
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskLogEntry) GetProgress() float32 {
|
|
if x != nil {
|
|
return x.Progress
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskLogEntry) GetStatus() string {
|
|
if x != nil {
|
|
return x.Status
|
|
}
|
|
return ""
|
|
}
|
|
|
|
// MaintenanceConfig holds configuration for the maintenance system
|
|
type MaintenanceConfig struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
|
|
ScanIntervalSeconds int32 `protobuf:"varint,2,opt,name=scan_interval_seconds,json=scanIntervalSeconds,proto3" json:"scan_interval_seconds,omitempty"` // How often to scan for maintenance needs
|
|
WorkerTimeoutSeconds int32 `protobuf:"varint,3,opt,name=worker_timeout_seconds,json=workerTimeoutSeconds,proto3" json:"worker_timeout_seconds,omitempty"` // Worker heartbeat timeout
|
|
TaskTimeoutSeconds int32 `protobuf:"varint,4,opt,name=task_timeout_seconds,json=taskTimeoutSeconds,proto3" json:"task_timeout_seconds,omitempty"` // Individual task timeout
|
|
RetryDelaySeconds int32 `protobuf:"varint,5,opt,name=retry_delay_seconds,json=retryDelaySeconds,proto3" json:"retry_delay_seconds,omitempty"` // Delay between retries
|
|
MaxRetries int32 `protobuf:"varint,6,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"` // Default max retries for tasks
|
|
CleanupIntervalSeconds int32 `protobuf:"varint,7,opt,name=cleanup_interval_seconds,json=cleanupIntervalSeconds,proto3" json:"cleanup_interval_seconds,omitempty"` // How often to clean up old tasks
|
|
TaskRetentionSeconds int32 `protobuf:"varint,8,opt,name=task_retention_seconds,json=taskRetentionSeconds,proto3" json:"task_retention_seconds,omitempty"` // How long to keep completed/failed tasks
|
|
Policy *MaintenancePolicy `protobuf:"bytes,9,opt,name=policy,proto3" json:"policy,omitempty"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *MaintenanceConfig) Reset() {
|
|
*x = MaintenanceConfig{}
|
|
mi := &file_worker_proto_msgTypes[24]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *MaintenanceConfig) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*MaintenanceConfig) ProtoMessage() {}
|
|
|
|
func (x *MaintenanceConfig) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[24]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use MaintenanceConfig.ProtoReflect.Descriptor instead.
|
|
func (*MaintenanceConfig) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{24}
|
|
}
|
|
|
|
func (x *MaintenanceConfig) GetEnabled() bool {
|
|
if x != nil {
|
|
return x.Enabled
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (x *MaintenanceConfig) GetScanIntervalSeconds() int32 {
|
|
if x != nil {
|
|
return x.ScanIntervalSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *MaintenanceConfig) GetWorkerTimeoutSeconds() int32 {
|
|
if x != nil {
|
|
return x.WorkerTimeoutSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *MaintenanceConfig) GetTaskTimeoutSeconds() int32 {
|
|
if x != nil {
|
|
return x.TaskTimeoutSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *MaintenanceConfig) GetRetryDelaySeconds() int32 {
|
|
if x != nil {
|
|
return x.RetryDelaySeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *MaintenanceConfig) GetMaxRetries() int32 {
|
|
if x != nil {
|
|
return x.MaxRetries
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *MaintenanceConfig) GetCleanupIntervalSeconds() int32 {
|
|
if x != nil {
|
|
return x.CleanupIntervalSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *MaintenanceConfig) GetTaskRetentionSeconds() int32 {
|
|
if x != nil {
|
|
return x.TaskRetentionSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *MaintenanceConfig) GetPolicy() *MaintenancePolicy {
|
|
if x != nil {
|
|
return x.Policy
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// MaintenancePolicy defines policies for maintenance operations
|
|
type MaintenancePolicy struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
TaskPolicies map[string]*TaskPolicy `protobuf:"bytes,1,rep,name=task_policies,json=taskPolicies,proto3" json:"task_policies,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Task type -> policy mapping
|
|
GlobalMaxConcurrent int32 `protobuf:"varint,2,opt,name=global_max_concurrent,json=globalMaxConcurrent,proto3" json:"global_max_concurrent,omitempty"` // Overall limit across all task types
|
|
DefaultRepeatIntervalSeconds int32 `protobuf:"varint,3,opt,name=default_repeat_interval_seconds,json=defaultRepeatIntervalSeconds,proto3" json:"default_repeat_interval_seconds,omitempty"` // Default seconds if task doesn't specify
|
|
DefaultCheckIntervalSeconds int32 `protobuf:"varint,4,opt,name=default_check_interval_seconds,json=defaultCheckIntervalSeconds,proto3" json:"default_check_interval_seconds,omitempty"` // Default seconds for periodic checks
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *MaintenancePolicy) Reset() {
|
|
*x = MaintenancePolicy{}
|
|
mi := &file_worker_proto_msgTypes[25]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *MaintenancePolicy) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*MaintenancePolicy) ProtoMessage() {}
|
|
|
|
func (x *MaintenancePolicy) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[25]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use MaintenancePolicy.ProtoReflect.Descriptor instead.
|
|
func (*MaintenancePolicy) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{25}
|
|
}
|
|
|
|
func (x *MaintenancePolicy) GetTaskPolicies() map[string]*TaskPolicy {
|
|
if x != nil {
|
|
return x.TaskPolicies
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *MaintenancePolicy) GetGlobalMaxConcurrent() int32 {
|
|
if x != nil {
|
|
return x.GlobalMaxConcurrent
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *MaintenancePolicy) GetDefaultRepeatIntervalSeconds() int32 {
|
|
if x != nil {
|
|
return x.DefaultRepeatIntervalSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *MaintenancePolicy) GetDefaultCheckIntervalSeconds() int32 {
|
|
if x != nil {
|
|
return x.DefaultCheckIntervalSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// TaskPolicy represents configuration for a specific task type
|
|
type TaskPolicy struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
|
|
MaxConcurrent int32 `protobuf:"varint,2,opt,name=max_concurrent,json=maxConcurrent,proto3" json:"max_concurrent,omitempty"`
|
|
RepeatIntervalSeconds int32 `protobuf:"varint,3,opt,name=repeat_interval_seconds,json=repeatIntervalSeconds,proto3" json:"repeat_interval_seconds,omitempty"` // Seconds to wait before repeating
|
|
CheckIntervalSeconds int32 `protobuf:"varint,4,opt,name=check_interval_seconds,json=checkIntervalSeconds,proto3" json:"check_interval_seconds,omitempty"` // Seconds between checks
|
|
// Typed task-specific configuration (replaces generic map)
|
|
//
|
|
// Types that are valid to be assigned to TaskConfig:
|
|
//
|
|
// *TaskPolicy_VacuumConfig
|
|
// *TaskPolicy_ErasureCodingConfig
|
|
// *TaskPolicy_BalanceConfig
|
|
// *TaskPolicy_ReplicationConfig
|
|
TaskConfig isTaskPolicy_TaskConfig `protobuf_oneof:"task_config"`
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *TaskPolicy) Reset() {
|
|
*x = TaskPolicy{}
|
|
mi := &file_worker_proto_msgTypes[26]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *TaskPolicy) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*TaskPolicy) ProtoMessage() {}
|
|
|
|
func (x *TaskPolicy) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[26]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use TaskPolicy.ProtoReflect.Descriptor instead.
|
|
func (*TaskPolicy) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{26}
|
|
}
|
|
|
|
func (x *TaskPolicy) GetEnabled() bool {
|
|
if x != nil {
|
|
return x.Enabled
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (x *TaskPolicy) GetMaxConcurrent() int32 {
|
|
if x != nil {
|
|
return x.MaxConcurrent
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskPolicy) GetRepeatIntervalSeconds() int32 {
|
|
if x != nil {
|
|
return x.RepeatIntervalSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskPolicy) GetCheckIntervalSeconds() int32 {
|
|
if x != nil {
|
|
return x.CheckIntervalSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *TaskPolicy) GetTaskConfig() isTaskPolicy_TaskConfig {
|
|
if x != nil {
|
|
return x.TaskConfig
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskPolicy) GetVacuumConfig() *VacuumTaskConfig {
|
|
if x != nil {
|
|
if x, ok := x.TaskConfig.(*TaskPolicy_VacuumConfig); ok {
|
|
return x.VacuumConfig
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskPolicy) GetErasureCodingConfig() *ErasureCodingTaskConfig {
|
|
if x != nil {
|
|
if x, ok := x.TaskConfig.(*TaskPolicy_ErasureCodingConfig); ok {
|
|
return x.ErasureCodingConfig
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskPolicy) GetBalanceConfig() *BalanceTaskConfig {
|
|
if x != nil {
|
|
if x, ok := x.TaskConfig.(*TaskPolicy_BalanceConfig); ok {
|
|
return x.BalanceConfig
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (x *TaskPolicy) GetReplicationConfig() *ReplicationTaskConfig {
|
|
if x != nil {
|
|
if x, ok := x.TaskConfig.(*TaskPolicy_ReplicationConfig); ok {
|
|
return x.ReplicationConfig
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type isTaskPolicy_TaskConfig interface {
|
|
isTaskPolicy_TaskConfig()
|
|
}
|
|
|
|
type TaskPolicy_VacuumConfig struct {
|
|
VacuumConfig *VacuumTaskConfig `protobuf:"bytes,5,opt,name=vacuum_config,json=vacuumConfig,proto3,oneof"`
|
|
}
|
|
|
|
type TaskPolicy_ErasureCodingConfig struct {
|
|
ErasureCodingConfig *ErasureCodingTaskConfig `protobuf:"bytes,6,opt,name=erasure_coding_config,json=erasureCodingConfig,proto3,oneof"`
|
|
}
|
|
|
|
type TaskPolicy_BalanceConfig struct {
|
|
BalanceConfig *BalanceTaskConfig `protobuf:"bytes,7,opt,name=balance_config,json=balanceConfig,proto3,oneof"`
|
|
}
|
|
|
|
type TaskPolicy_ReplicationConfig struct {
|
|
ReplicationConfig *ReplicationTaskConfig `protobuf:"bytes,8,opt,name=replication_config,json=replicationConfig,proto3,oneof"`
|
|
}
|
|
|
|
func (*TaskPolicy_VacuumConfig) isTaskPolicy_TaskConfig() {}
|
|
|
|
func (*TaskPolicy_ErasureCodingConfig) isTaskPolicy_TaskConfig() {}
|
|
|
|
func (*TaskPolicy_BalanceConfig) isTaskPolicy_TaskConfig() {}
|
|
|
|
func (*TaskPolicy_ReplicationConfig) isTaskPolicy_TaskConfig() {}
|
|
|
|
// VacuumTaskConfig contains vacuum-specific configuration
|
|
type VacuumTaskConfig struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
GarbageThreshold float64 `protobuf:"fixed64,1,opt,name=garbage_threshold,json=garbageThreshold,proto3" json:"garbage_threshold,omitempty"` // Minimum garbage ratio to trigger vacuum (0.0-1.0)
|
|
MinVolumeAgeHours int32 `protobuf:"varint,2,opt,name=min_volume_age_hours,json=minVolumeAgeHours,proto3" json:"min_volume_age_hours,omitempty"` // Minimum age before vacuum is considered
|
|
MinIntervalSeconds int32 `protobuf:"varint,3,opt,name=min_interval_seconds,json=minIntervalSeconds,proto3" json:"min_interval_seconds,omitempty"` // Minimum time between vacuum operations on the same volume
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *VacuumTaskConfig) Reset() {
|
|
*x = VacuumTaskConfig{}
|
|
mi := &file_worker_proto_msgTypes[27]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *VacuumTaskConfig) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*VacuumTaskConfig) ProtoMessage() {}
|
|
|
|
func (x *VacuumTaskConfig) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[27]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use VacuumTaskConfig.ProtoReflect.Descriptor instead.
|
|
func (*VacuumTaskConfig) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{27}
|
|
}
|
|
|
|
func (x *VacuumTaskConfig) GetGarbageThreshold() float64 {
|
|
if x != nil {
|
|
return x.GarbageThreshold
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *VacuumTaskConfig) GetMinVolumeAgeHours() int32 {
|
|
if x != nil {
|
|
return x.MinVolumeAgeHours
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *VacuumTaskConfig) GetMinIntervalSeconds() int32 {
|
|
if x != nil {
|
|
return x.MinIntervalSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// ErasureCodingTaskConfig contains EC-specific configuration
|
|
type ErasureCodingTaskConfig struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
FullnessRatio float64 `protobuf:"fixed64,1,opt,name=fullness_ratio,json=fullnessRatio,proto3" json:"fullness_ratio,omitempty"` // Minimum fullness ratio to trigger EC (0.0-1.0)
|
|
QuietForSeconds int32 `protobuf:"varint,2,opt,name=quiet_for_seconds,json=quietForSeconds,proto3" json:"quiet_for_seconds,omitempty"` // Minimum quiet time before EC
|
|
MinVolumeSizeMb int32 `protobuf:"varint,3,opt,name=min_volume_size_mb,json=minVolumeSizeMb,proto3" json:"min_volume_size_mb,omitempty"` // Minimum volume size for EC
|
|
CollectionFilter string `protobuf:"bytes,4,opt,name=collection_filter,json=collectionFilter,proto3" json:"collection_filter,omitempty"` // Only process volumes from specific collections
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *ErasureCodingTaskConfig) Reset() {
|
|
*x = ErasureCodingTaskConfig{}
|
|
mi := &file_worker_proto_msgTypes[28]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *ErasureCodingTaskConfig) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*ErasureCodingTaskConfig) ProtoMessage() {}
|
|
|
|
func (x *ErasureCodingTaskConfig) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[28]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use ErasureCodingTaskConfig.ProtoReflect.Descriptor instead.
|
|
func (*ErasureCodingTaskConfig) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{28}
|
|
}
|
|
|
|
func (x *ErasureCodingTaskConfig) GetFullnessRatio() float64 {
|
|
if x != nil {
|
|
return x.FullnessRatio
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *ErasureCodingTaskConfig) GetQuietForSeconds() int32 {
|
|
if x != nil {
|
|
return x.QuietForSeconds
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *ErasureCodingTaskConfig) GetMinVolumeSizeMb() int32 {
|
|
if x != nil {
|
|
return x.MinVolumeSizeMb
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *ErasureCodingTaskConfig) GetCollectionFilter() string {
|
|
if x != nil {
|
|
return x.CollectionFilter
|
|
}
|
|
return ""
|
|
}
|
|
|
|
// BalanceTaskConfig contains balance-specific configuration
|
|
type BalanceTaskConfig struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
ImbalanceThreshold float64 `protobuf:"fixed64,1,opt,name=imbalance_threshold,json=imbalanceThreshold,proto3" json:"imbalance_threshold,omitempty"` // Threshold for triggering rebalancing (0.0-1.0)
|
|
MinServerCount int32 `protobuf:"varint,2,opt,name=min_server_count,json=minServerCount,proto3" json:"min_server_count,omitempty"` // Minimum number of servers required for balancing
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *BalanceTaskConfig) Reset() {
|
|
*x = BalanceTaskConfig{}
|
|
mi := &file_worker_proto_msgTypes[29]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *BalanceTaskConfig) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*BalanceTaskConfig) ProtoMessage() {}
|
|
|
|
func (x *BalanceTaskConfig) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[29]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use BalanceTaskConfig.ProtoReflect.Descriptor instead.
|
|
func (*BalanceTaskConfig) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{29}
|
|
}
|
|
|
|
func (x *BalanceTaskConfig) GetImbalanceThreshold() float64 {
|
|
if x != nil {
|
|
return x.ImbalanceThreshold
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func (x *BalanceTaskConfig) GetMinServerCount() int32 {
|
|
if x != nil {
|
|
return x.MinServerCount
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// ReplicationTaskConfig contains replication-specific configuration
|
|
type ReplicationTaskConfig struct {
|
|
state protoimpl.MessageState `protogen:"open.v1"`
|
|
TargetReplicaCount int32 `protobuf:"varint,1,opt,name=target_replica_count,json=targetReplicaCount,proto3" json:"target_replica_count,omitempty"` // Target number of replicas
|
|
unknownFields protoimpl.UnknownFields
|
|
sizeCache protoimpl.SizeCache
|
|
}
|
|
|
|
func (x *ReplicationTaskConfig) Reset() {
|
|
*x = ReplicationTaskConfig{}
|
|
mi := &file_worker_proto_msgTypes[30]
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
|
|
func (x *ReplicationTaskConfig) String() string {
|
|
return protoimpl.X.MessageStringOf(x)
|
|
}
|
|
|
|
func (*ReplicationTaskConfig) ProtoMessage() {}
|
|
|
|
func (x *ReplicationTaskConfig) ProtoReflect() protoreflect.Message {
|
|
mi := &file_worker_proto_msgTypes[30]
|
|
if x != nil {
|
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
if ms.LoadMessageInfo() == nil {
|
|
ms.StoreMessageInfo(mi)
|
|
}
|
|
return ms
|
|
}
|
|
return mi.MessageOf(x)
|
|
}
|
|
|
|
// Deprecated: Use ReplicationTaskConfig.ProtoReflect.Descriptor instead.
|
|
func (*ReplicationTaskConfig) Descriptor() ([]byte, []int) {
|
|
return file_worker_proto_rawDescGZIP(), []int{30}
|
|
}
|
|
|
|
func (x *ReplicationTaskConfig) GetTargetReplicaCount() int32 {
|
|
if x != nil {
|
|
return x.TargetReplicaCount
|
|
}
|
|
return 0
|
|
}
|
|
|
|
var File_worker_proto protoreflect.FileDescriptor
|
|
|
|
const file_worker_proto_rawDesc = "" +
|
|
"\n" +
|
|
"\fworker.proto\x12\tworker_pb\"\x90\x04\n" +
|
|
"\rWorkerMessage\x12\x1b\n" +
|
|
"\tworker_id\x18\x01 \x01(\tR\bworkerId\x12\x1c\n" +
|
|
"\ttimestamp\x18\x02 \x01(\x03R\ttimestamp\x12C\n" +
|
|
"\fregistration\x18\x03 \x01(\v2\x1d.worker_pb.WorkerRegistrationH\x00R\fregistration\x12:\n" +
|
|
"\theartbeat\x18\x04 \x01(\v2\x1a.worker_pb.WorkerHeartbeatH\x00R\theartbeat\x12;\n" +
|
|
"\ftask_request\x18\x05 \x01(\v2\x16.worker_pb.TaskRequestH\x00R\vtaskRequest\x128\n" +
|
|
"\vtask_update\x18\x06 \x01(\v2\x15.worker_pb.TaskUpdateH\x00R\n" +
|
|
"taskUpdate\x12>\n" +
|
|
"\rtask_complete\x18\a \x01(\v2\x17.worker_pb.TaskCompleteH\x00R\ftaskComplete\x127\n" +
|
|
"\bshutdown\x18\b \x01(\v2\x19.worker_pb.WorkerShutdownH\x00R\bshutdown\x12H\n" +
|
|
"\x11task_log_response\x18\t \x01(\v2\x1a.worker_pb.TaskLogResponseH\x00R\x0ftaskLogResponseB\t\n" +
|
|
"\amessage\"\x95\x04\n" +
|
|
"\fAdminMessage\x12\x19\n" +
|
|
"\badmin_id\x18\x01 \x01(\tR\aadminId\x12\x1c\n" +
|
|
"\ttimestamp\x18\x02 \x01(\x03R\ttimestamp\x12V\n" +
|
|
"\x15registration_response\x18\x03 \x01(\v2\x1f.worker_pb.RegistrationResponseH\x00R\x14registrationResponse\x12M\n" +
|
|
"\x12heartbeat_response\x18\x04 \x01(\v2\x1c.worker_pb.HeartbeatResponseH\x00R\x11heartbeatResponse\x12D\n" +
|
|
"\x0ftask_assignment\x18\x05 \x01(\v2\x19.worker_pb.TaskAssignmentH\x00R\x0etaskAssignment\x12J\n" +
|
|
"\x11task_cancellation\x18\x06 \x01(\v2\x1b.worker_pb.TaskCancellationH\x00R\x10taskCancellation\x12A\n" +
|
|
"\x0eadmin_shutdown\x18\a \x01(\v2\x18.worker_pb.AdminShutdownH\x00R\radminShutdown\x12E\n" +
|
|
"\x10task_log_request\x18\b \x01(\v2\x19.worker_pb.TaskLogRequestH\x00R\x0etaskLogRequestB\t\n" +
|
|
"\amessage\"\x9c\x02\n" +
|
|
"\x12WorkerRegistration\x12\x1b\n" +
|
|
"\tworker_id\x18\x01 \x01(\tR\bworkerId\x12\x18\n" +
|
|
"\aaddress\x18\x02 \x01(\tR\aaddress\x12\"\n" +
|
|
"\fcapabilities\x18\x03 \x03(\tR\fcapabilities\x12%\n" +
|
|
"\x0emax_concurrent\x18\x04 \x01(\x05R\rmaxConcurrent\x12G\n" +
|
|
"\bmetadata\x18\x05 \x03(\v2+.worker_pb.WorkerRegistration.MetadataEntryR\bmetadata\x1a;\n" +
|
|
"\rMetadataEntry\x12\x10\n" +
|
|
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
|
|
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"x\n" +
|
|
"\x14RegistrationResponse\x12\x18\n" +
|
|
"\asuccess\x18\x01 \x01(\bR\asuccess\x12\x18\n" +
|
|
"\amessage\x18\x02 \x01(\tR\amessage\x12,\n" +
|
|
"\x12assigned_worker_id\x18\x03 \x01(\tR\x10assignedWorkerId\"\xad\x02\n" +
|
|
"\x0fWorkerHeartbeat\x12\x1b\n" +
|
|
"\tworker_id\x18\x01 \x01(\tR\bworkerId\x12\x16\n" +
|
|
"\x06status\x18\x02 \x01(\tR\x06status\x12!\n" +
|
|
"\fcurrent_load\x18\x03 \x01(\x05R\vcurrentLoad\x12%\n" +
|
|
"\x0emax_concurrent\x18\x04 \x01(\x05R\rmaxConcurrent\x12(\n" +
|
|
"\x10current_task_ids\x18\x05 \x03(\tR\x0ecurrentTaskIds\x12'\n" +
|
|
"\x0ftasks_completed\x18\x06 \x01(\x05R\x0etasksCompleted\x12!\n" +
|
|
"\ftasks_failed\x18\a \x01(\x05R\vtasksFailed\x12%\n" +
|
|
"\x0euptime_seconds\x18\b \x01(\x03R\ruptimeSeconds\"G\n" +
|
|
"\x11HeartbeatResponse\x12\x18\n" +
|
|
"\asuccess\x18\x01 \x01(\bR\asuccess\x12\x18\n" +
|
|
"\amessage\x18\x02 \x01(\tR\amessage\"w\n" +
|
|
"\vTaskRequest\x12\x1b\n" +
|
|
"\tworker_id\x18\x01 \x01(\tR\bworkerId\x12\"\n" +
|
|
"\fcapabilities\x18\x02 \x03(\tR\fcapabilities\x12'\n" +
|
|
"\x0favailable_slots\x18\x03 \x01(\x05R\x0eavailableSlots\"\xb6\x02\n" +
|
|
"\x0eTaskAssignment\x12\x17\n" +
|
|
"\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" +
|
|
"\ttask_type\x18\x02 \x01(\tR\btaskType\x12-\n" +
|
|
"\x06params\x18\x03 \x01(\v2\x15.worker_pb.TaskParamsR\x06params\x12\x1a\n" +
|
|
"\bpriority\x18\x04 \x01(\x05R\bpriority\x12!\n" +
|
|
"\fcreated_time\x18\x05 \x01(\x03R\vcreatedTime\x12C\n" +
|
|
"\bmetadata\x18\x06 \x03(\v2'.worker_pb.TaskAssignment.MetadataEntryR\bmetadata\x1a;\n" +
|
|
"\rMetadataEntry\x12\x10\n" +
|
|
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
|
|
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xb3\x04\n" +
|
|
"\n" +
|
|
"TaskParams\x12\x17\n" +
|
|
"\atask_id\x18\f \x01(\tR\x06taskId\x12\x1b\n" +
|
|
"\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x16\n" +
|
|
"\x06server\x18\x02 \x01(\tR\x06server\x12\x1e\n" +
|
|
"\n" +
|
|
"collection\x18\x03 \x01(\tR\n" +
|
|
"collection\x12\x1f\n" +
|
|
"\vdata_center\x18\x04 \x01(\tR\n" +
|
|
"dataCenter\x12\x12\n" +
|
|
"\x04rack\x18\x05 \x01(\tR\x04rack\x12\x1a\n" +
|
|
"\breplicas\x18\x06 \x03(\tR\breplicas\x12\x1f\n" +
|
|
"\vvolume_size\x18\v \x01(\x04R\n" +
|
|
"volumeSize\x12B\n" +
|
|
"\rvacuum_params\x18\a \x01(\v2\x1b.worker_pb.VacuumTaskParamsH\x00R\fvacuumParams\x12X\n" +
|
|
"\x15erasure_coding_params\x18\b \x01(\v2\".worker_pb.ErasureCodingTaskParamsH\x00R\x13erasureCodingParams\x12E\n" +
|
|
"\x0ebalance_params\x18\t \x01(\v2\x1c.worker_pb.BalanceTaskParamsH\x00R\rbalanceParams\x12Q\n" +
|
|
"\x12replication_params\x18\n" +
|
|
" \x01(\v2 .worker_pb.ReplicationTaskParamsH\x00R\x11replicationParamsB\r\n" +
|
|
"\vtask_params\"\xcb\x01\n" +
|
|
"\x10VacuumTaskParams\x12+\n" +
|
|
"\x11garbage_threshold\x18\x01 \x01(\x01R\x10garbageThreshold\x12!\n" +
|
|
"\fforce_vacuum\x18\x02 \x01(\bR\vforceVacuum\x12\x1d\n" +
|
|
"\n" +
|
|
"batch_size\x18\x03 \x01(\x05R\tbatchSize\x12\x1f\n" +
|
|
"\vworking_dir\x18\x04 \x01(\tR\n" +
|
|
"workingDir\x12'\n" +
|
|
"\x0fverify_checksum\x18\x05 \x01(\bR\x0everifyChecksum\"\xcb\x03\n" +
|
|
"\x17ErasureCodingTaskParams\x120\n" +
|
|
"\x14estimated_shard_size\x18\x03 \x01(\x04R\x12estimatedShardSize\x12\x1f\n" +
|
|
"\vdata_shards\x18\x04 \x01(\x05R\n" +
|
|
"dataShards\x12#\n" +
|
|
"\rparity_shards\x18\x05 \x01(\x05R\fparityShards\x12\x1f\n" +
|
|
"\vworking_dir\x18\x06 \x01(\tR\n" +
|
|
"workingDir\x12#\n" +
|
|
"\rmaster_client\x18\a \x01(\tR\fmasterClient\x12%\n" +
|
|
"\x0ecleanup_source\x18\b \x01(\bR\rcleanupSource\x12/\n" +
|
|
"\x13placement_conflicts\x18\t \x03(\tR\x12placementConflicts\x12<\n" +
|
|
"\fdestinations\x18\n" +
|
|
" \x03(\v2\x18.worker_pb.ECDestinationR\fdestinations\x12\\\n" +
|
|
"\x18existing_shard_locations\x18\v \x03(\v2\".worker_pb.ExistingECShardLocationR\x16existingShardLocations\"\x9a\x01\n" +
|
|
"\rECDestination\x12\x12\n" +
|
|
"\x04node\x18\x01 \x01(\tR\x04node\x12\x17\n" +
|
|
"\adisk_id\x18\x02 \x01(\rR\x06diskId\x12\x12\n" +
|
|
"\x04rack\x18\x03 \x01(\tR\x04rack\x12\x1f\n" +
|
|
"\vdata_center\x18\x04 \x01(\tR\n" +
|
|
"dataCenter\x12'\n" +
|
|
"\x0fplacement_score\x18\x05 \x01(\x01R\x0eplacementScore\"J\n" +
|
|
"\x17ExistingECShardLocation\x12\x12\n" +
|
|
"\x04node\x18\x01 \x01(\tR\x04node\x12\x1b\n" +
|
|
"\tshard_ids\x18\x02 \x03(\rR\bshardIds\"\xaf\x02\n" +
|
|
"\x11BalanceTaskParams\x12\x1b\n" +
|
|
"\tdest_node\x18\x01 \x01(\tR\bdestNode\x12%\n" +
|
|
"\x0eestimated_size\x18\x02 \x01(\x04R\restimatedSize\x12\x1b\n" +
|
|
"\tdest_rack\x18\x03 \x01(\tR\bdestRack\x12\x17\n" +
|
|
"\adest_dc\x18\x04 \x01(\tR\x06destDc\x12'\n" +
|
|
"\x0fplacement_score\x18\x05 \x01(\x01R\x0eplacementScore\x12/\n" +
|
|
"\x13placement_conflicts\x18\x06 \x03(\tR\x12placementConflicts\x12\x1d\n" +
|
|
"\n" +
|
|
"force_move\x18\a \x01(\bR\tforceMove\x12'\n" +
|
|
"\x0ftimeout_seconds\x18\b \x01(\x05R\x0etimeoutSeconds\"\xbf\x02\n" +
|
|
"\x15ReplicationTaskParams\x12\x1b\n" +
|
|
"\tdest_node\x18\x01 \x01(\tR\bdestNode\x12%\n" +
|
|
"\x0eestimated_size\x18\x02 \x01(\x04R\restimatedSize\x12\x1b\n" +
|
|
"\tdest_rack\x18\x03 \x01(\tR\bdestRack\x12\x17\n" +
|
|
"\adest_dc\x18\x04 \x01(\tR\x06destDc\x12'\n" +
|
|
"\x0fplacement_score\x18\x05 \x01(\x01R\x0eplacementScore\x12/\n" +
|
|
"\x13placement_conflicts\x18\x06 \x03(\tR\x12placementConflicts\x12#\n" +
|
|
"\rreplica_count\x18\a \x01(\x05R\freplicaCount\x12-\n" +
|
|
"\x12verify_consistency\x18\b \x01(\bR\x11verifyConsistency\"\x8e\x02\n" +
|
|
"\n" +
|
|
"TaskUpdate\x12\x17\n" +
|
|
"\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" +
|
|
"\tworker_id\x18\x02 \x01(\tR\bworkerId\x12\x16\n" +
|
|
"\x06status\x18\x03 \x01(\tR\x06status\x12\x1a\n" +
|
|
"\bprogress\x18\x04 \x01(\x02R\bprogress\x12\x18\n" +
|
|
"\amessage\x18\x05 \x01(\tR\amessage\x12?\n" +
|
|
"\bmetadata\x18\x06 \x03(\v2#.worker_pb.TaskUpdate.MetadataEntryR\bmetadata\x1a;\n" +
|
|
"\rMetadataEntry\x12\x10\n" +
|
|
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
|
|
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xc5\x02\n" +
|
|
"\fTaskComplete\x12\x17\n" +
|
|
"\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" +
|
|
"\tworker_id\x18\x02 \x01(\tR\bworkerId\x12\x18\n" +
|
|
"\asuccess\x18\x03 \x01(\bR\asuccess\x12#\n" +
|
|
"\rerror_message\x18\x04 \x01(\tR\ferrorMessage\x12'\n" +
|
|
"\x0fcompletion_time\x18\x05 \x01(\x03R\x0ecompletionTime\x12T\n" +
|
|
"\x0fresult_metadata\x18\x06 \x03(\v2+.worker_pb.TaskComplete.ResultMetadataEntryR\x0eresultMetadata\x1aA\n" +
|
|
"\x13ResultMetadataEntry\x12\x10\n" +
|
|
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
|
|
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"Y\n" +
|
|
"\x10TaskCancellation\x12\x17\n" +
|
|
"\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x16\n" +
|
|
"\x06reason\x18\x02 \x01(\tR\x06reason\x12\x14\n" +
|
|
"\x05force\x18\x03 \x01(\bR\x05force\"o\n" +
|
|
"\x0eWorkerShutdown\x12\x1b\n" +
|
|
"\tworker_id\x18\x01 \x01(\tR\bworkerId\x12\x16\n" +
|
|
"\x06reason\x18\x02 \x01(\tR\x06reason\x12(\n" +
|
|
"\x10pending_task_ids\x18\x03 \x03(\tR\x0ependingTaskIds\"c\n" +
|
|
"\rAdminShutdown\x12\x16\n" +
|
|
"\x06reason\x18\x01 \x01(\tR\x06reason\x12:\n" +
|
|
"\x19graceful_shutdown_seconds\x18\x02 \x01(\x05R\x17gracefulShutdownSeconds\"\xe9\x01\n" +
|
|
"\x0eTaskLogRequest\x12\x17\n" +
|
|
"\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" +
|
|
"\tworker_id\x18\x02 \x01(\tR\bworkerId\x12)\n" +
|
|
"\x10include_metadata\x18\x03 \x01(\bR\x0fincludeMetadata\x12\x1f\n" +
|
|
"\vmax_entries\x18\x04 \x01(\x05R\n" +
|
|
"maxEntries\x12\x1b\n" +
|
|
"\tlog_level\x18\x05 \x01(\tR\blogLevel\x12\x1d\n" +
|
|
"\n" +
|
|
"start_time\x18\x06 \x01(\x03R\tstartTime\x12\x19\n" +
|
|
"\bend_time\x18\a \x01(\x03R\aendTime\"\xf8\x01\n" +
|
|
"\x0fTaskLogResponse\x12\x17\n" +
|
|
"\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" +
|
|
"\tworker_id\x18\x02 \x01(\tR\bworkerId\x12\x18\n" +
|
|
"\asuccess\x18\x03 \x01(\bR\asuccess\x12#\n" +
|
|
"\rerror_message\x18\x04 \x01(\tR\ferrorMessage\x126\n" +
|
|
"\bmetadata\x18\x05 \x01(\v2\x1a.worker_pb.TaskLogMetadataR\bmetadata\x128\n" +
|
|
"\vlog_entries\x18\x06 \x03(\v2\x17.worker_pb.TaskLogEntryR\n" +
|
|
"logEntries\"\x97\x04\n" +
|
|
"\x0fTaskLogMetadata\x12\x17\n" +
|
|
"\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" +
|
|
"\ttask_type\x18\x02 \x01(\tR\btaskType\x12\x1b\n" +
|
|
"\tworker_id\x18\x03 \x01(\tR\bworkerId\x12\x1d\n" +
|
|
"\n" +
|
|
"start_time\x18\x04 \x01(\x03R\tstartTime\x12\x19\n" +
|
|
"\bend_time\x18\x05 \x01(\x03R\aendTime\x12\x1f\n" +
|
|
"\vduration_ms\x18\x06 \x01(\x03R\n" +
|
|
"durationMs\x12\x16\n" +
|
|
"\x06status\x18\a \x01(\tR\x06status\x12\x1a\n" +
|
|
"\bprogress\x18\b \x01(\x02R\bprogress\x12\x1b\n" +
|
|
"\tvolume_id\x18\t \x01(\rR\bvolumeId\x12\x16\n" +
|
|
"\x06server\x18\n" +
|
|
" \x01(\tR\x06server\x12\x1e\n" +
|
|
"\n" +
|
|
"collection\x18\v \x01(\tR\n" +
|
|
"collection\x12\"\n" +
|
|
"\rlog_file_path\x18\f \x01(\tR\vlogFilePath\x12\x1d\n" +
|
|
"\n" +
|
|
"created_at\x18\r \x01(\x03R\tcreatedAt\x12K\n" +
|
|
"\vcustom_data\x18\x0e \x03(\v2*.worker_pb.TaskLogMetadata.CustomDataEntryR\n" +
|
|
"customData\x1a=\n" +
|
|
"\x0fCustomDataEntry\x12\x10\n" +
|
|
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
|
|
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x88\x02\n" +
|
|
"\fTaskLogEntry\x12\x1c\n" +
|
|
"\ttimestamp\x18\x01 \x01(\x03R\ttimestamp\x12\x14\n" +
|
|
"\x05level\x18\x02 \x01(\tR\x05level\x12\x18\n" +
|
|
"\amessage\x18\x03 \x01(\tR\amessage\x12;\n" +
|
|
"\x06fields\x18\x04 \x03(\v2#.worker_pb.TaskLogEntry.FieldsEntryR\x06fields\x12\x1a\n" +
|
|
"\bprogress\x18\x05 \x01(\x02R\bprogress\x12\x16\n" +
|
|
"\x06status\x18\x06 \x01(\tR\x06status\x1a9\n" +
|
|
"\vFieldsEntry\x12\x10\n" +
|
|
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
|
|
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xc0\x03\n" +
|
|
"\x11MaintenanceConfig\x12\x18\n" +
|
|
"\aenabled\x18\x01 \x01(\bR\aenabled\x122\n" +
|
|
"\x15scan_interval_seconds\x18\x02 \x01(\x05R\x13scanIntervalSeconds\x124\n" +
|
|
"\x16worker_timeout_seconds\x18\x03 \x01(\x05R\x14workerTimeoutSeconds\x120\n" +
|
|
"\x14task_timeout_seconds\x18\x04 \x01(\x05R\x12taskTimeoutSeconds\x12.\n" +
|
|
"\x13retry_delay_seconds\x18\x05 \x01(\x05R\x11retryDelaySeconds\x12\x1f\n" +
|
|
"\vmax_retries\x18\x06 \x01(\x05R\n" +
|
|
"maxRetries\x128\n" +
|
|
"\x18cleanup_interval_seconds\x18\a \x01(\x05R\x16cleanupIntervalSeconds\x124\n" +
|
|
"\x16task_retention_seconds\x18\b \x01(\x05R\x14taskRetentionSeconds\x124\n" +
|
|
"\x06policy\x18\t \x01(\v2\x1c.worker_pb.MaintenancePolicyR\x06policy\"\x80\x03\n" +
|
|
"\x11MaintenancePolicy\x12S\n" +
|
|
"\rtask_policies\x18\x01 \x03(\v2..worker_pb.MaintenancePolicy.TaskPoliciesEntryR\ftaskPolicies\x122\n" +
|
|
"\x15global_max_concurrent\x18\x02 \x01(\x05R\x13globalMaxConcurrent\x12E\n" +
|
|
"\x1fdefault_repeat_interval_seconds\x18\x03 \x01(\x05R\x1cdefaultRepeatIntervalSeconds\x12C\n" +
|
|
"\x1edefault_check_interval_seconds\x18\x04 \x01(\x05R\x1bdefaultCheckIntervalSeconds\x1aV\n" +
|
|
"\x11TaskPoliciesEntry\x12\x10\n" +
|
|
"\x03key\x18\x01 \x01(\tR\x03key\x12+\n" +
|
|
"\x05value\x18\x02 \x01(\v2\x15.worker_pb.TaskPolicyR\x05value:\x028\x01\"\x82\x04\n" +
|
|
"\n" +
|
|
"TaskPolicy\x12\x18\n" +
|
|
"\aenabled\x18\x01 \x01(\bR\aenabled\x12%\n" +
|
|
"\x0emax_concurrent\x18\x02 \x01(\x05R\rmaxConcurrent\x126\n" +
|
|
"\x17repeat_interval_seconds\x18\x03 \x01(\x05R\x15repeatIntervalSeconds\x124\n" +
|
|
"\x16check_interval_seconds\x18\x04 \x01(\x05R\x14checkIntervalSeconds\x12B\n" +
|
|
"\rvacuum_config\x18\x05 \x01(\v2\x1b.worker_pb.VacuumTaskConfigH\x00R\fvacuumConfig\x12X\n" +
|
|
"\x15erasure_coding_config\x18\x06 \x01(\v2\".worker_pb.ErasureCodingTaskConfigH\x00R\x13erasureCodingConfig\x12E\n" +
|
|
"\x0ebalance_config\x18\a \x01(\v2\x1c.worker_pb.BalanceTaskConfigH\x00R\rbalanceConfig\x12Q\n" +
|
|
"\x12replication_config\x18\b \x01(\v2 .worker_pb.ReplicationTaskConfigH\x00R\x11replicationConfigB\r\n" +
|
|
"\vtask_config\"\xa2\x01\n" +
|
|
"\x10VacuumTaskConfig\x12+\n" +
|
|
"\x11garbage_threshold\x18\x01 \x01(\x01R\x10garbageThreshold\x12/\n" +
|
|
"\x14min_volume_age_hours\x18\x02 \x01(\x05R\x11minVolumeAgeHours\x120\n" +
|
|
"\x14min_interval_seconds\x18\x03 \x01(\x05R\x12minIntervalSeconds\"\xc6\x01\n" +
|
|
"\x17ErasureCodingTaskConfig\x12%\n" +
|
|
"\x0efullness_ratio\x18\x01 \x01(\x01R\rfullnessRatio\x12*\n" +
|
|
"\x11quiet_for_seconds\x18\x02 \x01(\x05R\x0fquietForSeconds\x12+\n" +
|
|
"\x12min_volume_size_mb\x18\x03 \x01(\x05R\x0fminVolumeSizeMb\x12+\n" +
|
|
"\x11collection_filter\x18\x04 \x01(\tR\x10collectionFilter\"n\n" +
|
|
"\x11BalanceTaskConfig\x12/\n" +
|
|
"\x13imbalance_threshold\x18\x01 \x01(\x01R\x12imbalanceThreshold\x12(\n" +
|
|
"\x10min_server_count\x18\x02 \x01(\x05R\x0eminServerCount\"I\n" +
|
|
"\x15ReplicationTaskConfig\x120\n" +
|
|
"\x14target_replica_count\x18\x01 \x01(\x05R\x12targetReplicaCount2V\n" +
|
|
"\rWorkerService\x12E\n" +
|
|
"\fWorkerStream\x12\x18.worker_pb.WorkerMessage\x1a\x17.worker_pb.AdminMessage(\x010\x01B2Z0github.com/seaweedfs/seaweedfs/weed/pb/worker_pbb\x06proto3"
|
|
|
|
var (
|
|
file_worker_proto_rawDescOnce sync.Once
|
|
file_worker_proto_rawDescData []byte
|
|
)
|
|
|
|
func file_worker_proto_rawDescGZIP() []byte {
|
|
file_worker_proto_rawDescOnce.Do(func() {
|
|
file_worker_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_worker_proto_rawDesc), len(file_worker_proto_rawDesc)))
|
|
})
|
|
return file_worker_proto_rawDescData
|
|
}
|
|
|
|
var file_worker_proto_msgTypes = make([]protoimpl.MessageInfo, 38)
|
|
var file_worker_proto_goTypes = []any{
|
|
(*WorkerMessage)(nil), // 0: worker_pb.WorkerMessage
|
|
(*AdminMessage)(nil), // 1: worker_pb.AdminMessage
|
|
(*WorkerRegistration)(nil), // 2: worker_pb.WorkerRegistration
|
|
(*RegistrationResponse)(nil), // 3: worker_pb.RegistrationResponse
|
|
(*WorkerHeartbeat)(nil), // 4: worker_pb.WorkerHeartbeat
|
|
(*HeartbeatResponse)(nil), // 5: worker_pb.HeartbeatResponse
|
|
(*TaskRequest)(nil), // 6: worker_pb.TaskRequest
|
|
(*TaskAssignment)(nil), // 7: worker_pb.TaskAssignment
|
|
(*TaskParams)(nil), // 8: worker_pb.TaskParams
|
|
(*VacuumTaskParams)(nil), // 9: worker_pb.VacuumTaskParams
|
|
(*ErasureCodingTaskParams)(nil), // 10: worker_pb.ErasureCodingTaskParams
|
|
(*ECDestination)(nil), // 11: worker_pb.ECDestination
|
|
(*ExistingECShardLocation)(nil), // 12: worker_pb.ExistingECShardLocation
|
|
(*BalanceTaskParams)(nil), // 13: worker_pb.BalanceTaskParams
|
|
(*ReplicationTaskParams)(nil), // 14: worker_pb.ReplicationTaskParams
|
|
(*TaskUpdate)(nil), // 15: worker_pb.TaskUpdate
|
|
(*TaskComplete)(nil), // 16: worker_pb.TaskComplete
|
|
(*TaskCancellation)(nil), // 17: worker_pb.TaskCancellation
|
|
(*WorkerShutdown)(nil), // 18: worker_pb.WorkerShutdown
|
|
(*AdminShutdown)(nil), // 19: worker_pb.AdminShutdown
|
|
(*TaskLogRequest)(nil), // 20: worker_pb.TaskLogRequest
|
|
(*TaskLogResponse)(nil), // 21: worker_pb.TaskLogResponse
|
|
(*TaskLogMetadata)(nil), // 22: worker_pb.TaskLogMetadata
|
|
(*TaskLogEntry)(nil), // 23: worker_pb.TaskLogEntry
|
|
(*MaintenanceConfig)(nil), // 24: worker_pb.MaintenanceConfig
|
|
(*MaintenancePolicy)(nil), // 25: worker_pb.MaintenancePolicy
|
|
(*TaskPolicy)(nil), // 26: worker_pb.TaskPolicy
|
|
(*VacuumTaskConfig)(nil), // 27: worker_pb.VacuumTaskConfig
|
|
(*ErasureCodingTaskConfig)(nil), // 28: worker_pb.ErasureCodingTaskConfig
|
|
(*BalanceTaskConfig)(nil), // 29: worker_pb.BalanceTaskConfig
|
|
(*ReplicationTaskConfig)(nil), // 30: worker_pb.ReplicationTaskConfig
|
|
nil, // 31: worker_pb.WorkerRegistration.MetadataEntry
|
|
nil, // 32: worker_pb.TaskAssignment.MetadataEntry
|
|
nil, // 33: worker_pb.TaskUpdate.MetadataEntry
|
|
nil, // 34: worker_pb.TaskComplete.ResultMetadataEntry
|
|
nil, // 35: worker_pb.TaskLogMetadata.CustomDataEntry
|
|
nil, // 36: worker_pb.TaskLogEntry.FieldsEntry
|
|
nil, // 37: worker_pb.MaintenancePolicy.TaskPoliciesEntry
|
|
}
|
|
var file_worker_proto_depIdxs = []int32{
|
|
2, // 0: worker_pb.WorkerMessage.registration:type_name -> worker_pb.WorkerRegistration
|
|
4, // 1: worker_pb.WorkerMessage.heartbeat:type_name -> worker_pb.WorkerHeartbeat
|
|
6, // 2: worker_pb.WorkerMessage.task_request:type_name -> worker_pb.TaskRequest
|
|
15, // 3: worker_pb.WorkerMessage.task_update:type_name -> worker_pb.TaskUpdate
|
|
16, // 4: worker_pb.WorkerMessage.task_complete:type_name -> worker_pb.TaskComplete
|
|
18, // 5: worker_pb.WorkerMessage.shutdown:type_name -> worker_pb.WorkerShutdown
|
|
21, // 6: worker_pb.WorkerMessage.task_log_response:type_name -> worker_pb.TaskLogResponse
|
|
3, // 7: worker_pb.AdminMessage.registration_response:type_name -> worker_pb.RegistrationResponse
|
|
5, // 8: worker_pb.AdminMessage.heartbeat_response:type_name -> worker_pb.HeartbeatResponse
|
|
7, // 9: worker_pb.AdminMessage.task_assignment:type_name -> worker_pb.TaskAssignment
|
|
17, // 10: worker_pb.AdminMessage.task_cancellation:type_name -> worker_pb.TaskCancellation
|
|
19, // 11: worker_pb.AdminMessage.admin_shutdown:type_name -> worker_pb.AdminShutdown
|
|
20, // 12: worker_pb.AdminMessage.task_log_request:type_name -> worker_pb.TaskLogRequest
|
|
31, // 13: worker_pb.WorkerRegistration.metadata:type_name -> worker_pb.WorkerRegistration.MetadataEntry
|
|
8, // 14: worker_pb.TaskAssignment.params:type_name -> worker_pb.TaskParams
|
|
32, // 15: worker_pb.TaskAssignment.metadata:type_name -> worker_pb.TaskAssignment.MetadataEntry
|
|
9, // 16: worker_pb.TaskParams.vacuum_params:type_name -> worker_pb.VacuumTaskParams
|
|
10, // 17: worker_pb.TaskParams.erasure_coding_params:type_name -> worker_pb.ErasureCodingTaskParams
|
|
13, // 18: worker_pb.TaskParams.balance_params:type_name -> worker_pb.BalanceTaskParams
|
|
14, // 19: worker_pb.TaskParams.replication_params:type_name -> worker_pb.ReplicationTaskParams
|
|
11, // 20: worker_pb.ErasureCodingTaskParams.destinations:type_name -> worker_pb.ECDestination
|
|
12, // 21: worker_pb.ErasureCodingTaskParams.existing_shard_locations:type_name -> worker_pb.ExistingECShardLocation
|
|
33, // 22: worker_pb.TaskUpdate.metadata:type_name -> worker_pb.TaskUpdate.MetadataEntry
|
|
34, // 23: worker_pb.TaskComplete.result_metadata:type_name -> worker_pb.TaskComplete.ResultMetadataEntry
|
|
22, // 24: worker_pb.TaskLogResponse.metadata:type_name -> worker_pb.TaskLogMetadata
|
|
23, // 25: worker_pb.TaskLogResponse.log_entries:type_name -> worker_pb.TaskLogEntry
|
|
35, // 26: worker_pb.TaskLogMetadata.custom_data:type_name -> worker_pb.TaskLogMetadata.CustomDataEntry
|
|
36, // 27: worker_pb.TaskLogEntry.fields:type_name -> worker_pb.TaskLogEntry.FieldsEntry
|
|
25, // 28: worker_pb.MaintenanceConfig.policy:type_name -> worker_pb.MaintenancePolicy
|
|
37, // 29: worker_pb.MaintenancePolicy.task_policies:type_name -> worker_pb.MaintenancePolicy.TaskPoliciesEntry
|
|
27, // 30: worker_pb.TaskPolicy.vacuum_config:type_name -> worker_pb.VacuumTaskConfig
|
|
28, // 31: worker_pb.TaskPolicy.erasure_coding_config:type_name -> worker_pb.ErasureCodingTaskConfig
|
|
29, // 32: worker_pb.TaskPolicy.balance_config:type_name -> worker_pb.BalanceTaskConfig
|
|
30, // 33: worker_pb.TaskPolicy.replication_config:type_name -> worker_pb.ReplicationTaskConfig
|
|
26, // 34: worker_pb.MaintenancePolicy.TaskPoliciesEntry.value:type_name -> worker_pb.TaskPolicy
|
|
0, // 35: worker_pb.WorkerService.WorkerStream:input_type -> worker_pb.WorkerMessage
|
|
1, // 36: worker_pb.WorkerService.WorkerStream:output_type -> worker_pb.AdminMessage
|
|
36, // [36:37] is the sub-list for method output_type
|
|
35, // [35:36] is the sub-list for method input_type
|
|
35, // [35:35] is the sub-list for extension type_name
|
|
35, // [35:35] is the sub-list for extension extendee
|
|
0, // [0:35] is the sub-list for field type_name
|
|
}
|
|
|
|
func init() { file_worker_proto_init() }
|
|
func file_worker_proto_init() {
|
|
if File_worker_proto != nil {
|
|
return
|
|
}
|
|
file_worker_proto_msgTypes[0].OneofWrappers = []any{
|
|
(*WorkerMessage_Registration)(nil),
|
|
(*WorkerMessage_Heartbeat)(nil),
|
|
(*WorkerMessage_TaskRequest)(nil),
|
|
(*WorkerMessage_TaskUpdate)(nil),
|
|
(*WorkerMessage_TaskComplete)(nil),
|
|
(*WorkerMessage_Shutdown)(nil),
|
|
(*WorkerMessage_TaskLogResponse)(nil),
|
|
}
|
|
file_worker_proto_msgTypes[1].OneofWrappers = []any{
|
|
(*AdminMessage_RegistrationResponse)(nil),
|
|
(*AdminMessage_HeartbeatResponse)(nil),
|
|
(*AdminMessage_TaskAssignment)(nil),
|
|
(*AdminMessage_TaskCancellation)(nil),
|
|
(*AdminMessage_AdminShutdown)(nil),
|
|
(*AdminMessage_TaskLogRequest)(nil),
|
|
}
|
|
file_worker_proto_msgTypes[8].OneofWrappers = []any{
|
|
(*TaskParams_VacuumParams)(nil),
|
|
(*TaskParams_ErasureCodingParams)(nil),
|
|
(*TaskParams_BalanceParams)(nil),
|
|
(*TaskParams_ReplicationParams)(nil),
|
|
}
|
|
file_worker_proto_msgTypes[26].OneofWrappers = []any{
|
|
(*TaskPolicy_VacuumConfig)(nil),
|
|
(*TaskPolicy_ErasureCodingConfig)(nil),
|
|
(*TaskPolicy_BalanceConfig)(nil),
|
|
(*TaskPolicy_ReplicationConfig)(nil),
|
|
}
|
|
type x struct{}
|
|
out := protoimpl.TypeBuilder{
|
|
File: protoimpl.DescBuilder{
|
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
RawDescriptor: unsafe.Slice(unsafe.StringData(file_worker_proto_rawDesc), len(file_worker_proto_rawDesc)),
|
|
NumEnums: 0,
|
|
NumMessages: 38,
|
|
NumExtensions: 0,
|
|
NumServices: 1,
|
|
},
|
|
GoTypes: file_worker_proto_goTypes,
|
|
DependencyIndexes: file_worker_proto_depIdxs,
|
|
MessageInfos: file_worker_proto_msgTypes,
|
|
}.Build()
|
|
File_worker_proto = out.File
|
|
file_worker_proto_goTypes = nil
|
|
file_worker_proto_depIdxs = nil
|
|
}
|