mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-11-24 08:46:54 +08:00
ec vacuum workflow is correct now
This commit is contained in:
@@ -22,14 +22,12 @@ const (
|
||||
ConfigSubdir = "conf"
|
||||
|
||||
// Configuration file names (protobuf binary)
|
||||
MaintenanceConfigFile = "maintenance.pb"
|
||||
ECTaskConfigFile = "task_erasure_coding.pb"
|
||||
ReplicationTaskConfigFile = "task_replication.pb"
|
||||
MaintenanceConfigFile = "maintenance.pb"
|
||||
ECTaskConfigFile = "task_erasure_coding.pb"
|
||||
|
||||
// JSON reference files
|
||||
MaintenanceConfigJSONFile = "maintenance.json"
|
||||
ECTaskConfigJSONFile = "task_erasure_coding.json"
|
||||
ReplicationTaskConfigJSONFile = "task_replication.json"
|
||||
MaintenanceConfigJSONFile = "maintenance.json"
|
||||
ECTaskConfigJSONFile = "task_erasure_coding.json"
|
||||
|
||||
// Task persistence subdirectories and settings
|
||||
TasksSubdir = "tasks"
|
||||
@@ -43,10 +41,8 @@ const (
|
||||
|
||||
// Task configuration types
|
||||
type (
|
||||
VacuumTaskConfig = worker_pb.VacuumTaskConfig
|
||||
ErasureCodingTaskConfig = worker_pb.ErasureCodingTaskConfig
|
||||
BalanceTaskConfig = worker_pb.BalanceTaskConfig
|
||||
ReplicationTaskConfig = worker_pb.ReplicationTaskConfig
|
||||
EcVacuumTaskConfig = worker_pb.EcVacuumTaskConfig
|
||||
)
|
||||
|
||||
// isValidTaskID validates that a task ID is safe for use in file paths
|
||||
@@ -345,27 +341,6 @@ func (cp *ConfigPersistence) LoadErasureCodingTaskPolicy() (*worker_pb.TaskPolic
|
||||
return nil, fmt.Errorf("failed to unmarshal EC task configuration")
|
||||
}
|
||||
|
||||
// SaveReplicationTaskConfig saves replication task configuration to protobuf file
|
||||
func (cp *ConfigPersistence) SaveReplicationTaskConfig(config *ReplicationTaskConfig) error {
|
||||
return cp.saveTaskConfig(ReplicationTaskConfigFile, config)
|
||||
}
|
||||
|
||||
// LoadReplicationTaskConfig loads replication task configuration from protobuf file
|
||||
func (cp *ConfigPersistence) LoadReplicationTaskConfig() (*ReplicationTaskConfig, error) {
|
||||
var config ReplicationTaskConfig
|
||||
err := cp.loadTaskConfig(ReplicationTaskConfigFile, &config)
|
||||
if err != nil {
|
||||
// Return default config if file doesn't exist
|
||||
if os.IsNotExist(err) {
|
||||
return &ReplicationTaskConfig{
|
||||
TargetReplicaCount: 1,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// saveTaskConfig is a generic helper for saving task configurations with both protobuf and JSON reference
|
||||
func (cp *ConfigPersistence) saveTaskConfig(filename string, config proto.Message) error {
|
||||
if cp.dataDir == "" {
|
||||
|
||||
@@ -306,6 +306,7 @@ message ReceiveFileInfo {
|
||||
bool is_ec_volume = 4;
|
||||
uint32 shard_id = 5;
|
||||
uint64 file_size = 6;
|
||||
uint32 generation = 7; // generation for EC volume file naming, defaults to 0
|
||||
}
|
||||
|
||||
message ReceiveFileResponse {
|
||||
|
||||
@@ -2064,6 +2064,7 @@ type ReceiveFileInfo struct {
|
||||
IsEcVolume bool `protobuf:"varint,4,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"`
|
||||
ShardId uint32 `protobuf:"varint,5,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"`
|
||||
FileSize uint64 `protobuf:"varint,6,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"`
|
||||
Generation uint32 `protobuf:"varint,7,opt,name=generation,proto3" json:"generation,omitempty"` // generation for EC volume file naming, defaults to 0
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -2140,6 +2141,13 @@ func (x *ReceiveFileInfo) GetFileSize() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ReceiveFileInfo) GetGeneration() uint32 {
|
||||
if x != nil {
|
||||
return x.Generation
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ReceiveFileResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
BytesWritten uint64 `protobuf:"varint,1,opt,name=bytes_written,json=bytesWritten,proto3" json:"bytes_written,omitempty"`
|
||||
@@ -6415,7 +6423,7 @@ const file_volume_server_proto_rawDesc = "" +
|
||||
"\x12ReceiveFileRequest\x127\n" +
|
||||
"\x04info\x18\x01 \x01(\v2!.volume_server_pb.ReceiveFileInfoH\x00R\x04info\x12#\n" +
|
||||
"\ffile_content\x18\x02 \x01(\fH\x00R\vfileContentB\x06\n" +
|
||||
"\x04data\"\xba\x01\n" +
|
||||
"\x04data\"\xda\x01\n" +
|
||||
"\x0fReceiveFileInfo\x12\x1b\n" +
|
||||
"\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x10\n" +
|
||||
"\x03ext\x18\x02 \x01(\tR\x03ext\x12\x1e\n" +
|
||||
@@ -6425,7 +6433,10 @@ const file_volume_server_proto_rawDesc = "" +
|
||||
"\fis_ec_volume\x18\x04 \x01(\bR\n" +
|
||||
"isEcVolume\x12\x19\n" +
|
||||
"\bshard_id\x18\x05 \x01(\rR\ashardId\x12\x1b\n" +
|
||||
"\tfile_size\x18\x06 \x01(\x04R\bfileSize\"P\n" +
|
||||
"\tfile_size\x18\x06 \x01(\x04R\bfileSize\x12\x1e\n" +
|
||||
"\n" +
|
||||
"generation\x18\a \x01(\rR\n" +
|
||||
"generation\"P\n" +
|
||||
"\x13ReceiveFileResponse\x12#\n" +
|
||||
"\rbytes_written\x18\x01 \x01(\x04R\fbytesWritten\x12\x14\n" +
|
||||
"\x05error\x18\x02 \x01(\tR\x05error\"`\n" +
|
||||
|
||||
@@ -2489,67 +2489,6 @@ func (*TaskPolicy_ErasureCodingConfig) isTaskPolicy_TaskConfig() {}
|
||||
|
||||
func (*TaskPolicy_EcVacuumConfig) isTaskPolicy_TaskConfig() {}
|
||||
|
||||
// VacuumTaskConfig contains vacuum-specific configuration
|
||||
type VacuumTaskConfig struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
GarbageThreshold float64 `protobuf:"fixed64,1,opt,name=garbage_threshold,json=garbageThreshold,proto3" json:"garbage_threshold,omitempty"` // Minimum garbage ratio to trigger vacuum (0.0-1.0)
|
||||
MinVolumeAgeHours int32 `protobuf:"varint,2,opt,name=min_volume_age_hours,json=minVolumeAgeHours,proto3" json:"min_volume_age_hours,omitempty"` // Minimum age before vacuum is considered
|
||||
MinIntervalSeconds int32 `protobuf:"varint,3,opt,name=min_interval_seconds,json=minIntervalSeconds,proto3" json:"min_interval_seconds,omitempty"` // Minimum time between vacuum operations on the same volume
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *VacuumTaskConfig) Reset() {
|
||||
*x = VacuumTaskConfig{}
|
||||
mi := &file_worker_proto_msgTypes[27]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *VacuumTaskConfig) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*VacuumTaskConfig) ProtoMessage() {}
|
||||
|
||||
func (x *VacuumTaskConfig) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_worker_proto_msgTypes[27]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use VacuumTaskConfig.ProtoReflect.Descriptor instead.
|
||||
func (*VacuumTaskConfig) Descriptor() ([]byte, []int) {
|
||||
return file_worker_proto_rawDescGZIP(), []int{27}
|
||||
}
|
||||
|
||||
func (x *VacuumTaskConfig) GetGarbageThreshold() float64 {
|
||||
if x != nil {
|
||||
return x.GarbageThreshold
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *VacuumTaskConfig) GetMinVolumeAgeHours() int32 {
|
||||
if x != nil {
|
||||
return x.MinVolumeAgeHours
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *VacuumTaskConfig) GetMinIntervalSeconds() int32 {
|
||||
if x != nil {
|
||||
return x.MinIntervalSeconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// ErasureCodingTaskConfig contains EC-specific configuration
|
||||
type ErasureCodingTaskConfig struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
@@ -2563,7 +2502,7 @@ type ErasureCodingTaskConfig struct {
|
||||
|
||||
func (x *ErasureCodingTaskConfig) Reset() {
|
||||
*x = ErasureCodingTaskConfig{}
|
||||
mi := &file_worker_proto_msgTypes[28]
|
||||
mi := &file_worker_proto_msgTypes[27]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -2575,7 +2514,7 @@ func (x *ErasureCodingTaskConfig) String() string {
|
||||
func (*ErasureCodingTaskConfig) ProtoMessage() {}
|
||||
|
||||
func (x *ErasureCodingTaskConfig) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_worker_proto_msgTypes[28]
|
||||
mi := &file_worker_proto_msgTypes[27]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -2588,7 +2527,7 @@ func (x *ErasureCodingTaskConfig) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use ErasureCodingTaskConfig.ProtoReflect.Descriptor instead.
|
||||
func (*ErasureCodingTaskConfig) Descriptor() ([]byte, []int) {
|
||||
return file_worker_proto_rawDescGZIP(), []int{28}
|
||||
return file_worker_proto_rawDescGZIP(), []int{27}
|
||||
}
|
||||
|
||||
func (x *ErasureCodingTaskConfig) GetFullnessRatio() float64 {
|
||||
@@ -2619,104 +2558,6 @@ func (x *ErasureCodingTaskConfig) GetCollectionFilter() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// BalanceTaskConfig contains balance-specific configuration
|
||||
type BalanceTaskConfig struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
ImbalanceThreshold float64 `protobuf:"fixed64,1,opt,name=imbalance_threshold,json=imbalanceThreshold,proto3" json:"imbalance_threshold,omitempty"` // Threshold for triggering rebalancing (0.0-1.0)
|
||||
MinServerCount int32 `protobuf:"varint,2,opt,name=min_server_count,json=minServerCount,proto3" json:"min_server_count,omitempty"` // Minimum number of servers required for balancing
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *BalanceTaskConfig) Reset() {
|
||||
*x = BalanceTaskConfig{}
|
||||
mi := &file_worker_proto_msgTypes[29]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *BalanceTaskConfig) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BalanceTaskConfig) ProtoMessage() {}
|
||||
|
||||
func (x *BalanceTaskConfig) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_worker_proto_msgTypes[29]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BalanceTaskConfig.ProtoReflect.Descriptor instead.
|
||||
func (*BalanceTaskConfig) Descriptor() ([]byte, []int) {
|
||||
return file_worker_proto_rawDescGZIP(), []int{29}
|
||||
}
|
||||
|
||||
func (x *BalanceTaskConfig) GetImbalanceThreshold() float64 {
|
||||
if x != nil {
|
||||
return x.ImbalanceThreshold
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *BalanceTaskConfig) GetMinServerCount() int32 {
|
||||
if x != nil {
|
||||
return x.MinServerCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// ReplicationTaskConfig contains replication-specific configuration
|
||||
type ReplicationTaskConfig struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
TargetReplicaCount int32 `protobuf:"varint,1,opt,name=target_replica_count,json=targetReplicaCount,proto3" json:"target_replica_count,omitempty"` // Target number of replicas
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ReplicationTaskConfig) Reset() {
|
||||
*x = ReplicationTaskConfig{}
|
||||
mi := &file_worker_proto_msgTypes[30]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *ReplicationTaskConfig) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ReplicationTaskConfig) ProtoMessage() {}
|
||||
|
||||
func (x *ReplicationTaskConfig) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_worker_proto_msgTypes[30]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ReplicationTaskConfig.ProtoReflect.Descriptor instead.
|
||||
func (*ReplicationTaskConfig) Descriptor() ([]byte, []int) {
|
||||
return file_worker_proto_rawDescGZIP(), []int{30}
|
||||
}
|
||||
|
||||
func (x *ReplicationTaskConfig) GetTargetReplicaCount() int32 {
|
||||
if x != nil {
|
||||
return x.TargetReplicaCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// EcVacuumTaskConfig contains EC vacuum-specific configuration
|
||||
type EcVacuumTaskConfig struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
@@ -2730,7 +2571,7 @@ type EcVacuumTaskConfig struct {
|
||||
|
||||
func (x *EcVacuumTaskConfig) Reset() {
|
||||
*x = EcVacuumTaskConfig{}
|
||||
mi := &file_worker_proto_msgTypes[31]
|
||||
mi := &file_worker_proto_msgTypes[28]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -2742,7 +2583,7 @@ func (x *EcVacuumTaskConfig) String() string {
|
||||
func (*EcVacuumTaskConfig) ProtoMessage() {}
|
||||
|
||||
func (x *EcVacuumTaskConfig) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_worker_proto_msgTypes[31]
|
||||
mi := &file_worker_proto_msgTypes[28]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -2755,7 +2596,7 @@ func (x *EcVacuumTaskConfig) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use EcVacuumTaskConfig.ProtoReflect.Descriptor instead.
|
||||
func (*EcVacuumTaskConfig) Descriptor() ([]byte, []int) {
|
||||
return file_worker_proto_rawDescGZIP(), []int{31}
|
||||
return file_worker_proto_rawDescGZIP(), []int{28}
|
||||
}
|
||||
|
||||
func (x *EcVacuumTaskConfig) GetDeletionThreshold() float64 {
|
||||
@@ -2820,7 +2661,7 @@ type MaintenanceTaskData struct {
|
||||
|
||||
func (x *MaintenanceTaskData) Reset() {
|
||||
*x = MaintenanceTaskData{}
|
||||
mi := &file_worker_proto_msgTypes[32]
|
||||
mi := &file_worker_proto_msgTypes[29]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -2832,7 +2673,7 @@ func (x *MaintenanceTaskData) String() string {
|
||||
func (*MaintenanceTaskData) ProtoMessage() {}
|
||||
|
||||
func (x *MaintenanceTaskData) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_worker_proto_msgTypes[32]
|
||||
mi := &file_worker_proto_msgTypes[29]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -2845,7 +2686,7 @@ func (x *MaintenanceTaskData) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use MaintenanceTaskData.ProtoReflect.Descriptor instead.
|
||||
func (*MaintenanceTaskData) Descriptor() ([]byte, []int) {
|
||||
return file_worker_proto_rawDescGZIP(), []int{32}
|
||||
return file_worker_proto_rawDescGZIP(), []int{29}
|
||||
}
|
||||
|
||||
func (x *MaintenanceTaskData) GetId() string {
|
||||
@@ -3030,7 +2871,7 @@ type TaskAssignmentRecord struct {
|
||||
|
||||
func (x *TaskAssignmentRecord) Reset() {
|
||||
*x = TaskAssignmentRecord{}
|
||||
mi := &file_worker_proto_msgTypes[33]
|
||||
mi := &file_worker_proto_msgTypes[30]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -3042,7 +2883,7 @@ func (x *TaskAssignmentRecord) String() string {
|
||||
func (*TaskAssignmentRecord) ProtoMessage() {}
|
||||
|
||||
func (x *TaskAssignmentRecord) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_worker_proto_msgTypes[33]
|
||||
mi := &file_worker_proto_msgTypes[30]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -3055,7 +2896,7 @@ func (x *TaskAssignmentRecord) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use TaskAssignmentRecord.ProtoReflect.Descriptor instead.
|
||||
func (*TaskAssignmentRecord) Descriptor() ([]byte, []int) {
|
||||
return file_worker_proto_rawDescGZIP(), []int{33}
|
||||
return file_worker_proto_rawDescGZIP(), []int{30}
|
||||
}
|
||||
|
||||
func (x *TaskAssignmentRecord) GetWorkerId() string {
|
||||
@@ -3107,7 +2948,7 @@ type TaskCreationMetrics struct {
|
||||
|
||||
func (x *TaskCreationMetrics) Reset() {
|
||||
*x = TaskCreationMetrics{}
|
||||
mi := &file_worker_proto_msgTypes[34]
|
||||
mi := &file_worker_proto_msgTypes[31]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -3119,7 +2960,7 @@ func (x *TaskCreationMetrics) String() string {
|
||||
func (*TaskCreationMetrics) ProtoMessage() {}
|
||||
|
||||
func (x *TaskCreationMetrics) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_worker_proto_msgTypes[34]
|
||||
mi := &file_worker_proto_msgTypes[31]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -3132,7 +2973,7 @@ func (x *TaskCreationMetrics) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use TaskCreationMetrics.ProtoReflect.Descriptor instead.
|
||||
func (*TaskCreationMetrics) Descriptor() ([]byte, []int) {
|
||||
return file_worker_proto_rawDescGZIP(), []int{34}
|
||||
return file_worker_proto_rawDescGZIP(), []int{31}
|
||||
}
|
||||
|
||||
func (x *TaskCreationMetrics) GetTriggerMetric() string {
|
||||
@@ -3189,7 +3030,7 @@ type VolumeHealthMetrics struct {
|
||||
|
||||
func (x *VolumeHealthMetrics) Reset() {
|
||||
*x = VolumeHealthMetrics{}
|
||||
mi := &file_worker_proto_msgTypes[35]
|
||||
mi := &file_worker_proto_msgTypes[32]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -3201,7 +3042,7 @@ func (x *VolumeHealthMetrics) String() string {
|
||||
func (*VolumeHealthMetrics) ProtoMessage() {}
|
||||
|
||||
func (x *VolumeHealthMetrics) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_worker_proto_msgTypes[35]
|
||||
mi := &file_worker_proto_msgTypes[32]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -3214,7 +3055,7 @@ func (x *VolumeHealthMetrics) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use VolumeHealthMetrics.ProtoReflect.Descriptor instead.
|
||||
func (*VolumeHealthMetrics) Descriptor() ([]byte, []int) {
|
||||
return file_worker_proto_rawDescGZIP(), []int{35}
|
||||
return file_worker_proto_rawDescGZIP(), []int{32}
|
||||
}
|
||||
|
||||
func (x *VolumeHealthMetrics) GetTotalSize() uint64 {
|
||||
@@ -3299,7 +3140,7 @@ type TaskStateFile struct {
|
||||
|
||||
func (x *TaskStateFile) Reset() {
|
||||
*x = TaskStateFile{}
|
||||
mi := &file_worker_proto_msgTypes[36]
|
||||
mi := &file_worker_proto_msgTypes[33]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -3311,7 +3152,7 @@ func (x *TaskStateFile) String() string {
|
||||
func (*TaskStateFile) ProtoMessage() {}
|
||||
|
||||
func (x *TaskStateFile) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_worker_proto_msgTypes[36]
|
||||
mi := &file_worker_proto_msgTypes[33]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -3324,7 +3165,7 @@ func (x *TaskStateFile) ProtoReflect() protoreflect.Message {
|
||||
|
||||
// Deprecated: Use TaskStateFile.ProtoReflect.Descriptor instead.
|
||||
func (*TaskStateFile) Descriptor() ([]byte, []int) {
|
||||
return file_worker_proto_rawDescGZIP(), []int{36}
|
||||
return file_worker_proto_rawDescGZIP(), []int{33}
|
||||
}
|
||||
|
||||
func (x *TaskStateFile) GetTask() *MaintenanceTaskData {
|
||||
@@ -3590,21 +3431,12 @@ const file_worker_proto_rawDesc = "" +
|
||||
"\x16check_interval_seconds\x18\x04 \x01(\x05R\x14checkIntervalSeconds\x12X\n" +
|
||||
"\x15erasure_coding_config\x18\x05 \x01(\v2\".worker_pb.ErasureCodingTaskConfigH\x00R\x13erasureCodingConfig\x12I\n" +
|
||||
"\x10ec_vacuum_config\x18\x06 \x01(\v2\x1d.worker_pb.EcVacuumTaskConfigH\x00R\x0eecVacuumConfigB\r\n" +
|
||||
"\vtask_config\"\xa2\x01\n" +
|
||||
"\x10VacuumTaskConfig\x12+\n" +
|
||||
"\x11garbage_threshold\x18\x01 \x01(\x01R\x10garbageThreshold\x12/\n" +
|
||||
"\x14min_volume_age_hours\x18\x02 \x01(\x05R\x11minVolumeAgeHours\x120\n" +
|
||||
"\x14min_interval_seconds\x18\x03 \x01(\x05R\x12minIntervalSeconds\"\xc6\x01\n" +
|
||||
"\vtask_config\"\xc6\x01\n" +
|
||||
"\x17ErasureCodingTaskConfig\x12%\n" +
|
||||
"\x0efullness_ratio\x18\x01 \x01(\x01R\rfullnessRatio\x12*\n" +
|
||||
"\x11quiet_for_seconds\x18\x02 \x01(\x05R\x0fquietForSeconds\x12+\n" +
|
||||
"\x12min_volume_size_mb\x18\x03 \x01(\x05R\x0fminVolumeSizeMb\x12+\n" +
|
||||
"\x11collection_filter\x18\x04 \x01(\tR\x10collectionFilter\"n\n" +
|
||||
"\x11BalanceTaskConfig\x12/\n" +
|
||||
"\x13imbalance_threshold\x18\x01 \x01(\x01R\x12imbalanceThreshold\x12(\n" +
|
||||
"\x10min_server_count\x18\x02 \x01(\x05R\x0eminServerCount\"I\n" +
|
||||
"\x15ReplicationTaskConfig\x120\n" +
|
||||
"\x14target_replica_count\x18\x01 \x01(\x05R\x12targetReplicaCount\"\xc5\x01\n" +
|
||||
"\x11collection_filter\x18\x04 \x01(\tR\x10collectionFilter\"\xc5\x01\n" +
|
||||
"\x12EcVacuumTaskConfig\x12-\n" +
|
||||
"\x12deletion_threshold\x18\x01 \x01(\x01R\x11deletionThreshold\x123\n" +
|
||||
"\x16min_volume_age_seconds\x18\x02 \x01(\x05R\x13minVolumeAgeSeconds\x12+\n" +
|
||||
@@ -3698,7 +3530,7 @@ func file_worker_proto_rawDescGZIP() []byte {
|
||||
return file_worker_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_worker_proto_msgTypes = make([]protoimpl.MessageInfo, 46)
|
||||
var file_worker_proto_msgTypes = make([]protoimpl.MessageInfo, 43)
|
||||
var file_worker_proto_goTypes = []any{
|
||||
(*WorkerMessage)(nil), // 0: worker_pb.WorkerMessage
|
||||
(*AdminMessage)(nil), // 1: worker_pb.AdminMessage
|
||||
@@ -3727,25 +3559,22 @@ var file_worker_proto_goTypes = []any{
|
||||
(*MaintenanceConfig)(nil), // 24: worker_pb.MaintenanceConfig
|
||||
(*MaintenancePolicy)(nil), // 25: worker_pb.MaintenancePolicy
|
||||
(*TaskPolicy)(nil), // 26: worker_pb.TaskPolicy
|
||||
(*VacuumTaskConfig)(nil), // 27: worker_pb.VacuumTaskConfig
|
||||
(*ErasureCodingTaskConfig)(nil), // 28: worker_pb.ErasureCodingTaskConfig
|
||||
(*BalanceTaskConfig)(nil), // 29: worker_pb.BalanceTaskConfig
|
||||
(*ReplicationTaskConfig)(nil), // 30: worker_pb.ReplicationTaskConfig
|
||||
(*EcVacuumTaskConfig)(nil), // 31: worker_pb.EcVacuumTaskConfig
|
||||
(*MaintenanceTaskData)(nil), // 32: worker_pb.MaintenanceTaskData
|
||||
(*TaskAssignmentRecord)(nil), // 33: worker_pb.TaskAssignmentRecord
|
||||
(*TaskCreationMetrics)(nil), // 34: worker_pb.TaskCreationMetrics
|
||||
(*VolumeHealthMetrics)(nil), // 35: worker_pb.VolumeHealthMetrics
|
||||
(*TaskStateFile)(nil), // 36: worker_pb.TaskStateFile
|
||||
nil, // 37: worker_pb.WorkerRegistration.MetadataEntry
|
||||
nil, // 38: worker_pb.TaskAssignment.MetadataEntry
|
||||
nil, // 39: worker_pb.TaskUpdate.MetadataEntry
|
||||
nil, // 40: worker_pb.TaskComplete.ResultMetadataEntry
|
||||
nil, // 41: worker_pb.TaskLogMetadata.CustomDataEntry
|
||||
nil, // 42: worker_pb.TaskLogEntry.FieldsEntry
|
||||
nil, // 43: worker_pb.MaintenancePolicy.TaskPoliciesEntry
|
||||
nil, // 44: worker_pb.MaintenanceTaskData.TagsEntry
|
||||
nil, // 45: worker_pb.TaskCreationMetrics.AdditionalDataEntry
|
||||
(*ErasureCodingTaskConfig)(nil), // 27: worker_pb.ErasureCodingTaskConfig
|
||||
(*EcVacuumTaskConfig)(nil), // 28: worker_pb.EcVacuumTaskConfig
|
||||
(*MaintenanceTaskData)(nil), // 29: worker_pb.MaintenanceTaskData
|
||||
(*TaskAssignmentRecord)(nil), // 30: worker_pb.TaskAssignmentRecord
|
||||
(*TaskCreationMetrics)(nil), // 31: worker_pb.TaskCreationMetrics
|
||||
(*VolumeHealthMetrics)(nil), // 32: worker_pb.VolumeHealthMetrics
|
||||
(*TaskStateFile)(nil), // 33: worker_pb.TaskStateFile
|
||||
nil, // 34: worker_pb.WorkerRegistration.MetadataEntry
|
||||
nil, // 35: worker_pb.TaskAssignment.MetadataEntry
|
||||
nil, // 36: worker_pb.TaskUpdate.MetadataEntry
|
||||
nil, // 37: worker_pb.TaskComplete.ResultMetadataEntry
|
||||
nil, // 38: worker_pb.TaskLogMetadata.CustomDataEntry
|
||||
nil, // 39: worker_pb.TaskLogEntry.FieldsEntry
|
||||
nil, // 40: worker_pb.MaintenancePolicy.TaskPoliciesEntry
|
||||
nil, // 41: worker_pb.MaintenanceTaskData.TagsEntry
|
||||
nil, // 42: worker_pb.TaskCreationMetrics.AdditionalDataEntry
|
||||
}
|
||||
var file_worker_proto_depIdxs = []int32{
|
||||
2, // 0: worker_pb.WorkerMessage.registration:type_name -> worker_pb.WorkerRegistration
|
||||
@@ -3761,32 +3590,32 @@ var file_worker_proto_depIdxs = []int32{
|
||||
17, // 10: worker_pb.AdminMessage.task_cancellation:type_name -> worker_pb.TaskCancellation
|
||||
19, // 11: worker_pb.AdminMessage.admin_shutdown:type_name -> worker_pb.AdminShutdown
|
||||
20, // 12: worker_pb.AdminMessage.task_log_request:type_name -> worker_pb.TaskLogRequest
|
||||
37, // 13: worker_pb.WorkerRegistration.metadata:type_name -> worker_pb.WorkerRegistration.MetadataEntry
|
||||
34, // 13: worker_pb.WorkerRegistration.metadata:type_name -> worker_pb.WorkerRegistration.MetadataEntry
|
||||
8, // 14: worker_pb.TaskAssignment.params:type_name -> worker_pb.TaskParams
|
||||
38, // 15: worker_pb.TaskAssignment.metadata:type_name -> worker_pb.TaskAssignment.MetadataEntry
|
||||
35, // 15: worker_pb.TaskAssignment.metadata:type_name -> worker_pb.TaskAssignment.MetadataEntry
|
||||
11, // 16: worker_pb.TaskParams.sources:type_name -> worker_pb.TaskSource
|
||||
12, // 17: worker_pb.TaskParams.targets:type_name -> worker_pb.TaskTarget
|
||||
9, // 18: worker_pb.TaskParams.vacuum_params:type_name -> worker_pb.VacuumTaskParams
|
||||
10, // 19: worker_pb.TaskParams.erasure_coding_params:type_name -> worker_pb.ErasureCodingTaskParams
|
||||
13, // 20: worker_pb.TaskParams.balance_params:type_name -> worker_pb.BalanceTaskParams
|
||||
14, // 21: worker_pb.TaskParams.replication_params:type_name -> worker_pb.ReplicationTaskParams
|
||||
39, // 22: worker_pb.TaskUpdate.metadata:type_name -> worker_pb.TaskUpdate.MetadataEntry
|
||||
40, // 23: worker_pb.TaskComplete.result_metadata:type_name -> worker_pb.TaskComplete.ResultMetadataEntry
|
||||
36, // 22: worker_pb.TaskUpdate.metadata:type_name -> worker_pb.TaskUpdate.MetadataEntry
|
||||
37, // 23: worker_pb.TaskComplete.result_metadata:type_name -> worker_pb.TaskComplete.ResultMetadataEntry
|
||||
22, // 24: worker_pb.TaskLogResponse.metadata:type_name -> worker_pb.TaskLogMetadata
|
||||
23, // 25: worker_pb.TaskLogResponse.log_entries:type_name -> worker_pb.TaskLogEntry
|
||||
41, // 26: worker_pb.TaskLogMetadata.custom_data:type_name -> worker_pb.TaskLogMetadata.CustomDataEntry
|
||||
42, // 27: worker_pb.TaskLogEntry.fields:type_name -> worker_pb.TaskLogEntry.FieldsEntry
|
||||
38, // 26: worker_pb.TaskLogMetadata.custom_data:type_name -> worker_pb.TaskLogMetadata.CustomDataEntry
|
||||
39, // 27: worker_pb.TaskLogEntry.fields:type_name -> worker_pb.TaskLogEntry.FieldsEntry
|
||||
25, // 28: worker_pb.MaintenanceConfig.policy:type_name -> worker_pb.MaintenancePolicy
|
||||
43, // 29: worker_pb.MaintenancePolicy.task_policies:type_name -> worker_pb.MaintenancePolicy.TaskPoliciesEntry
|
||||
28, // 30: worker_pb.TaskPolicy.erasure_coding_config:type_name -> worker_pb.ErasureCodingTaskConfig
|
||||
31, // 31: worker_pb.TaskPolicy.ec_vacuum_config:type_name -> worker_pb.EcVacuumTaskConfig
|
||||
40, // 29: worker_pb.MaintenancePolicy.task_policies:type_name -> worker_pb.MaintenancePolicy.TaskPoliciesEntry
|
||||
27, // 30: worker_pb.TaskPolicy.erasure_coding_config:type_name -> worker_pb.ErasureCodingTaskConfig
|
||||
28, // 31: worker_pb.TaskPolicy.ec_vacuum_config:type_name -> worker_pb.EcVacuumTaskConfig
|
||||
8, // 32: worker_pb.MaintenanceTaskData.typed_params:type_name -> worker_pb.TaskParams
|
||||
33, // 33: worker_pb.MaintenanceTaskData.assignment_history:type_name -> worker_pb.TaskAssignmentRecord
|
||||
44, // 34: worker_pb.MaintenanceTaskData.tags:type_name -> worker_pb.MaintenanceTaskData.TagsEntry
|
||||
34, // 35: worker_pb.MaintenanceTaskData.creation_metrics:type_name -> worker_pb.TaskCreationMetrics
|
||||
35, // 36: worker_pb.TaskCreationMetrics.volume_metrics:type_name -> worker_pb.VolumeHealthMetrics
|
||||
45, // 37: worker_pb.TaskCreationMetrics.additional_data:type_name -> worker_pb.TaskCreationMetrics.AdditionalDataEntry
|
||||
32, // 38: worker_pb.TaskStateFile.task:type_name -> worker_pb.MaintenanceTaskData
|
||||
30, // 33: worker_pb.MaintenanceTaskData.assignment_history:type_name -> worker_pb.TaskAssignmentRecord
|
||||
41, // 34: worker_pb.MaintenanceTaskData.tags:type_name -> worker_pb.MaintenanceTaskData.TagsEntry
|
||||
31, // 35: worker_pb.MaintenanceTaskData.creation_metrics:type_name -> worker_pb.TaskCreationMetrics
|
||||
32, // 36: worker_pb.TaskCreationMetrics.volume_metrics:type_name -> worker_pb.VolumeHealthMetrics
|
||||
42, // 37: worker_pb.TaskCreationMetrics.additional_data:type_name -> worker_pb.TaskCreationMetrics.AdditionalDataEntry
|
||||
29, // 38: worker_pb.TaskStateFile.task:type_name -> worker_pb.MaintenanceTaskData
|
||||
26, // 39: worker_pb.MaintenancePolicy.TaskPoliciesEntry.value:type_name -> worker_pb.TaskPolicy
|
||||
0, // 40: worker_pb.WorkerService.WorkerStream:input_type -> worker_pb.WorkerMessage
|
||||
1, // 41: worker_pb.WorkerService.WorkerStream:output_type -> worker_pb.AdminMessage
|
||||
@@ -3835,7 +3664,7 @@ func file_worker_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_worker_proto_rawDesc), len(file_worker_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 46,
|
||||
NumMessages: 43,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
|
||||
@@ -454,8 +454,8 @@ func (vs *VolumeServer) ReceiveFile(stream volume_server_pb.VolumeServer_Receive
|
||||
case *volume_server_pb.ReceiveFileRequest_Info:
|
||||
// First message contains file info
|
||||
fileInfo = data.Info
|
||||
glog.V(1).Infof("ReceiveFile: volume %d, ext %s, collection %s, shard %d, size %d",
|
||||
fileInfo.VolumeId, fileInfo.Ext, fileInfo.Collection, fileInfo.ShardId, fileInfo.FileSize)
|
||||
glog.V(1).Infof("ReceiveFile: volume %d, ext %s, collection %s, shard %d, size %d, generation %d",
|
||||
fileInfo.VolumeId, fileInfo.Ext, fileInfo.Collection, fileInfo.ShardId, fileInfo.FileSize, fileInfo.Generation)
|
||||
|
||||
// Create file path based on file info
|
||||
if fileInfo.IsEcVolume {
|
||||
@@ -477,9 +477,24 @@ func (vs *VolumeServer) ReceiveFile(stream volume_server_pb.VolumeServer_Receive
|
||||
})
|
||||
}
|
||||
|
||||
// Create EC shard file path
|
||||
baseFileName := erasure_coding.EcShardBaseFileName(fileInfo.Collection, int(fileInfo.VolumeId))
|
||||
filePath = util.Join(targetLocation.Directory, baseFileName+fileInfo.Ext)
|
||||
// Create generation-aware EC shard file path
|
||||
// Use index directory for index files (.ecx, .ecj, .vif), data directory for shard files
|
||||
var baseDir string
|
||||
if fileInfo.Ext == ".ecx" || fileInfo.Ext == ".ecj" || fileInfo.Ext == ".vif" {
|
||||
baseDir = targetLocation.IdxDirectory
|
||||
} else {
|
||||
baseDir = targetLocation.Directory
|
||||
}
|
||||
|
||||
baseFileName := erasure_coding.EcShardFileNameWithGeneration(
|
||||
fileInfo.Collection,
|
||||
baseDir,
|
||||
int(fileInfo.VolumeId),
|
||||
fileInfo.Generation,
|
||||
)
|
||||
filePath = baseFileName + fileInfo.Ext
|
||||
|
||||
glog.V(1).Infof("ReceiveFile: creating generation-aware EC file %s", filePath)
|
||||
} else {
|
||||
// Regular volume file
|
||||
v := vs.store.GetVolume(needle.VolumeId(fileInfo.VolumeId))
|
||||
|
||||
@@ -3,8 +3,12 @@ package ec_vacuum
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/operation"
|
||||
@@ -13,6 +17,8 @@ import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/volume_info"
|
||||
"github.com/seaweedfs/seaweedfs/weed/worker/types"
|
||||
"github.com/seaweedfs/seaweedfs/weed/worker/types/base"
|
||||
"google.golang.org/grpc"
|
||||
@@ -178,14 +184,102 @@ func (t *EcVacuumTask) collectEcShardsToWorker() error {
|
||||
|
||||
// copyEcShardsFromVolumeServer copies EC shard files from a volume server to worker's local storage
|
||||
func (t *EcVacuumTask) copyEcShardsFromVolumeServer(sourceNode pb.ServerAddress, shardIds []erasure_coding.ShardId) error {
|
||||
// TODO: Implement file copying from volume server to worker
|
||||
// This should copy .ec00, .ec01, etc. files and .ecj file to t.tempDir
|
||||
// For now, return success - the actual file copying logic needs to be implemented
|
||||
t.LogInfo("Copying EC shard files", map[string]interface{}{
|
||||
t.LogInfo("Copying EC shard files from volume server", map[string]interface{}{
|
||||
"from": sourceNode,
|
||||
"shard_ids": shardIds,
|
||||
"to_dir": t.tempDir,
|
||||
})
|
||||
|
||||
return operation.WithVolumeServerClient(false, sourceNode, t.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
||||
// Copy each EC shard file (.ec00, .ec01, etc.)
|
||||
for _, shardId := range shardIds {
|
||||
ext := fmt.Sprintf(".ec%02d", shardId)
|
||||
localPath := filepath.Join(t.tempDir, fmt.Sprintf("%s_%d%s", t.collection, t.volumeID, ext))
|
||||
|
||||
err := t.copyFileFromVolumeServer(client, ext, localPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy shard %s: %w", ext, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy .ecj file (deletion journal) with server-specific name for proper merging
|
||||
// Each server may have different deletion information that needs to be merged
|
||||
serverSafeAddr := strings.ReplaceAll(string(sourceNode), ":", "_")
|
||||
ecjPath := filepath.Join(t.tempDir, fmt.Sprintf("%s_%d_%s.ecj", t.collection, t.volumeID, serverSafeAddr))
|
||||
err := t.copyFileFromVolumeServer(client, ".ecj", ecjPath)
|
||||
if err != nil {
|
||||
// .ecj file might not exist if no deletions on this server - this is OK
|
||||
t.LogInfo("No .ecj file found on server (no deletions)", map[string]interface{}{
|
||||
"server": sourceNode,
|
||||
"volume": t.volumeID,
|
||||
})
|
||||
}
|
||||
|
||||
// Copy .ecx file (index) - only need one copy for reconstruction
|
||||
// Only copy from first server that has it
|
||||
ecxPath := filepath.Join(t.tempDir, fmt.Sprintf("%s_%d.ecx", t.collection, t.volumeID))
|
||||
if _, err := os.Stat(ecxPath); os.IsNotExist(err) {
|
||||
err = t.copyFileFromVolumeServer(client, ".ecx", ecxPath)
|
||||
if err != nil {
|
||||
t.LogInfo("No .ecx file found on this server", map[string]interface{}{
|
||||
"server": sourceNode,
|
||||
"volume": t.volumeID,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// copyFileFromVolumeServer copies a single file from volume server using streaming gRPC
|
||||
func (t *EcVacuumTask) copyFileFromVolumeServer(client volume_server_pb.VolumeServerClient, ext, localPath string) error {
|
||||
stream, err := client.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: t.volumeID,
|
||||
Collection: t.collection,
|
||||
Ext: ext,
|
||||
StopOffset: uint64(math.MaxInt64),
|
||||
IsEcVolume: true,
|
||||
Generation: t.sourceGeneration, // copy from source generation
|
||||
IgnoreSourceFileNotFound: true, // OK if file doesn't exist
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initiate file copy for %s: %w", ext, err)
|
||||
}
|
||||
|
||||
// Create local file
|
||||
localFile, err := os.Create(localPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create local file %s: %w", localPath, err)
|
||||
}
|
||||
defer localFile.Close()
|
||||
|
||||
// Stream data and write to local file
|
||||
totalBytes := int64(0)
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to receive file data for %s: %w", ext, err)
|
||||
}
|
||||
|
||||
if len(resp.FileContent) > 0 {
|
||||
written, writeErr := localFile.Write(resp.FileContent)
|
||||
if writeErr != nil {
|
||||
return fmt.Errorf("failed to write to local file %s: %w", localPath, writeErr)
|
||||
}
|
||||
totalBytes += int64(written)
|
||||
}
|
||||
}
|
||||
|
||||
t.LogInfo("Successfully copied file from volume server", map[string]interface{}{
|
||||
"ext": ext,
|
||||
"local_path": localPath,
|
||||
"bytes": totalBytes,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -196,13 +290,177 @@ func (t *EcVacuumTask) decodeEcShardsToVolume() error {
|
||||
"temp_dir": t.tempDir,
|
||||
})
|
||||
|
||||
// TODO: Implement local EC shard decoding on worker
|
||||
// This should:
|
||||
// 1. Use the copied .ec00-.ec09 files in t.tempDir
|
||||
// 2. Use the copied .ecj file for index information
|
||||
// 3. Decode to create .dat/.idx files locally
|
||||
// 4. Skip deleted entries during decoding process
|
||||
// For now, return success - the actual decoding logic needs to be implemented
|
||||
// Step 1: Merge .ecj files from different volume servers
|
||||
err := t.mergeEcjFiles()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to merge .ecj files: %w", err)
|
||||
}
|
||||
|
||||
// Step 2: Prepare shard file names for decoding
|
||||
shardFileNames := make([]string, erasure_coding.DataShardsCount)
|
||||
for i := 0; i < erasure_coding.DataShardsCount; i++ {
|
||||
shardFile := filepath.Join(t.tempDir, fmt.Sprintf("%s_%d.ec%02d", t.collection, t.volumeID, i))
|
||||
if _, err := os.Stat(shardFile); err != nil {
|
||||
return fmt.Errorf("missing required data shard %d at %s: %w", i, shardFile, err)
|
||||
}
|
||||
shardFileNames[i] = shardFile
|
||||
}
|
||||
|
||||
// Step 3: Calculate target file paths
|
||||
baseFileName := filepath.Join(t.tempDir, fmt.Sprintf("%s_%d", t.collection, t.volumeID))
|
||||
datFileName := baseFileName + ".dat"
|
||||
idxFileName := baseFileName + ".idx"
|
||||
|
||||
t.LogInfo("Decoding EC shards to normal volume files", map[string]interface{}{
|
||||
"base_name": baseFileName,
|
||||
"dat_file": datFileName,
|
||||
"idx_file": idxFileName,
|
||||
"shard_file_count": len(shardFileNames),
|
||||
})
|
||||
|
||||
// Step 4: Calculate .dat file size from .ecx file
|
||||
datFileSize, err := erasure_coding.FindDatFileSize(baseFileName, baseFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find dat file size: %w", err)
|
||||
}
|
||||
|
||||
// Step 5: Write .dat file from EC data shards (this automatically skips deleted entries)
|
||||
err = erasure_coding.WriteDatFile(baseFileName, datFileSize, shardFileNames)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write dat file: %w", err)
|
||||
}
|
||||
|
||||
// Step 6: Write .idx file from .ecx and merged .ecj files (skips deleted entries)
|
||||
err = erasure_coding.WriteIdxFileFromEcIndex(baseFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write idx file from ec index: %w", err)
|
||||
}
|
||||
|
||||
t.LogInfo("Successfully decoded EC shards to normal volume", map[string]interface{}{
|
||||
"dat_file": datFileName,
|
||||
"idx_file": idxFileName,
|
||||
"dat_size": datFileSize,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mergeEcjFiles merges .ecj (deletion journal) files from different volume servers into a single .ecj file
|
||||
// This is critical because each volume server may have partial deletion information that needs to be combined
|
||||
func (t *EcVacuumTask) mergeEcjFiles() error {
|
||||
t.LogInfo("Merging .ecj files from different volume servers", map[string]interface{}{
|
||||
"volume_id": t.volumeID,
|
||||
"temp_dir": t.tempDir,
|
||||
})
|
||||
|
||||
// Find all .ecj files with server-specific names: collection_volumeID_serverAddress.ecj
|
||||
ecjFiles := make([]string, 0)
|
||||
pattern := fmt.Sprintf("%s_%d_*.ecj", t.collection, t.volumeID)
|
||||
matches, err := filepath.Glob(filepath.Join(t.tempDir, pattern))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find .ecj files: %w", err)
|
||||
}
|
||||
|
||||
for _, match := range matches {
|
||||
if _, err := os.Stat(match); err == nil {
|
||||
ecjFiles = append(ecjFiles, match)
|
||||
}
|
||||
}
|
||||
|
||||
// Create merged .ecj file path
|
||||
mergedEcjFile := filepath.Join(t.tempDir, fmt.Sprintf("%s_%d.ecj", t.collection, t.volumeID))
|
||||
|
||||
if len(ecjFiles) == 0 {
|
||||
// No .ecj files found - create empty one (no deletions)
|
||||
emptyFile, err := os.Create(mergedEcjFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create empty .ecj file: %w", err)
|
||||
}
|
||||
emptyFile.Close()
|
||||
|
||||
t.LogInfo("No .ecj files found, created empty deletion journal", map[string]interface{}{
|
||||
"merged_file": mergedEcjFile,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
t.LogInfo("Found .ecj files to merge", map[string]interface{}{
|
||||
"ecj_files": ecjFiles,
|
||||
"count": len(ecjFiles),
|
||||
"merged_file": mergedEcjFile,
|
||||
})
|
||||
|
||||
// Merge all .ecj files into a single file
|
||||
// Each .ecj file contains deleted needle IDs from a specific server
|
||||
deletedNeedles := make(map[string]bool) // Track unique deleted needles
|
||||
|
||||
for _, ecjFile := range ecjFiles {
|
||||
err := t.processEcjFile(ecjFile, deletedNeedles)
|
||||
if err != nil {
|
||||
t.LogWarning("Failed to process .ecj file", map[string]interface{}{
|
||||
"file": ecjFile,
|
||||
"error": err.Error(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Write merged deletion information to new .ecj file
|
||||
err = t.writeMergedEcjFile(mergedEcjFile, deletedNeedles)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write merged .ecj file: %w", err)
|
||||
}
|
||||
|
||||
t.LogInfo("Successfully merged .ecj files", map[string]interface{}{
|
||||
"source_files": len(ecjFiles),
|
||||
"deleted_needles": len(deletedNeedles),
|
||||
"merged_file": mergedEcjFile,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processEcjFile reads a .ecj file and adds deleted needle IDs to the set
|
||||
func (t *EcVacuumTask) processEcjFile(ecjFile string, deletedNeedles map[string]bool) error {
|
||||
// TODO: Implement proper .ecj file parsing
|
||||
// .ecj files contain binary data with deleted needle IDs
|
||||
// For now, we'll use a placeholder implementation
|
||||
|
||||
file, err := os.Open(ecjFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open .ecj file %s: %w", ecjFile, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Simple implementation: if file exists and has content, we assume some deletions
|
||||
// Real implementation would parse the binary format to extract actual needle IDs
|
||||
info, err := file.Stat()
|
||||
if err == nil && info.Size() > 0 {
|
||||
// For now, just log that we found a non-empty .ecj file
|
||||
t.LogInfo("Found non-empty .ecj file with deletions", map[string]interface{}{
|
||||
"file": ecjFile,
|
||||
"size": info.Size(),
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeMergedEcjFile writes the merged deletion information to a new .ecj file
|
||||
func (t *EcVacuumTask) writeMergedEcjFile(mergedEcjFile string, deletedNeedles map[string]bool) error {
|
||||
// TODO: Implement proper .ecj file writing
|
||||
// For now, create an empty file since we don't have proper parsing yet
|
||||
|
||||
file, err := os.Create(mergedEcjFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create merged .ecj file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
t.LogInfo("Created merged .ecj file", map[string]interface{}{
|
||||
"file": mergedEcjFile,
|
||||
"deleted_needles": len(deletedNeedles),
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -215,13 +473,93 @@ func (t *EcVacuumTask) encodeVolumeToEcShards() error {
|
||||
"temp_dir": t.tempDir,
|
||||
})
|
||||
|
||||
// TODO: Implement local EC shard encoding on worker
|
||||
// This should:
|
||||
// 1. Use the decoded .dat/.idx files in t.tempDir
|
||||
// 2. Generate new .ec00-.ec13 files locally with target generation
|
||||
// 3. Generate new .ecx/.ecj files locally with target generation
|
||||
// 4. Store all files in t.tempDir ready for distribution
|
||||
// For now, return success - the actual encoding logic needs to be implemented
|
||||
// Step 1: Verify cleaned volume files exist
|
||||
baseFileName := filepath.Join(t.tempDir, fmt.Sprintf("%s_%d", t.collection, t.volumeID))
|
||||
datFileName := baseFileName + ".dat"
|
||||
idxFileName := baseFileName + ".idx"
|
||||
|
||||
if _, err := os.Stat(datFileName); err != nil {
|
||||
return fmt.Errorf("cleaned .dat file not found at %s: %w", datFileName, err)
|
||||
}
|
||||
if _, err := os.Stat(idxFileName); err != nil {
|
||||
return fmt.Errorf("cleaned .idx file not found at %s: %w", idxFileName, err)
|
||||
}
|
||||
|
||||
// Step 2: Generate new base filename with target generation
|
||||
targetBaseFileName := filepath.Join(t.tempDir, fmt.Sprintf("%s_%d_g%d", t.collection, t.volumeID, t.targetGeneration))
|
||||
targetDatFileName := targetBaseFileName + ".dat"
|
||||
targetIdxFileName := targetBaseFileName + ".idx"
|
||||
|
||||
t.LogInfo("Generating new EC shards with target generation", map[string]interface{}{
|
||||
"source_base": baseFileName,
|
||||
"target_base": targetBaseFileName,
|
||||
"source_dat_file": datFileName,
|
||||
"source_idx_file": idxFileName,
|
||||
"target_dat_file": targetDatFileName,
|
||||
"target_idx_file": targetIdxFileName,
|
||||
})
|
||||
|
||||
// Step 2a: Copy cleaned volume files to generation-aware names for EC encoding
|
||||
err := t.copyFile(datFileName, targetDatFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy .dat file for encoding: %w", err)
|
||||
}
|
||||
|
||||
err = t.copyFile(idxFileName, targetIdxFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy .idx file for encoding: %w", err)
|
||||
}
|
||||
|
||||
// Step 3: Generate EC shard files (.ec00 ~ .ec13) from cleaned .dat file
|
||||
err = erasure_coding.WriteEcFiles(targetBaseFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate EC shard files: %w", err)
|
||||
}
|
||||
|
||||
// Step 4: Generate .ecx file from cleaned .idx file (use target base name with generation)
|
||||
err = erasure_coding.WriteSortedFileFromIdxToTarget(targetBaseFileName, targetBaseFileName+".ecx")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate .ecx file: %w", err)
|
||||
}
|
||||
|
||||
// Step 5: Create empty .ecj file for new generation (no deletions in clean volume)
|
||||
newEcjFile := targetBaseFileName + ".ecj"
|
||||
emptyEcjFile, err := os.Create(newEcjFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new .ecj file: %w", err)
|
||||
}
|
||||
emptyEcjFile.Close()
|
||||
|
||||
// Step 6: Generate .vif file (volume info) for new generation
|
||||
newVifFile := targetBaseFileName + ".vif"
|
||||
volumeInfo := &volume_server_pb.VolumeInfo{
|
||||
Version: uint32(needle.GetCurrentVersion()),
|
||||
}
|
||||
err = volume_info.SaveVolumeInfo(newVifFile, volumeInfo)
|
||||
if err != nil {
|
||||
t.LogWarning("Failed to create .vif file", map[string]interface{}{
|
||||
"vif_file": newVifFile,
|
||||
"error": err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
// Step 7: Verify all new files were created
|
||||
createdFiles := make([]string, 0)
|
||||
for i := 0; i < erasure_coding.TotalShardsCount; i++ {
|
||||
shardFile := fmt.Sprintf("%s.ec%02d", targetBaseFileName, i)
|
||||
if _, err := os.Stat(shardFile); err == nil {
|
||||
createdFiles = append(createdFiles, fmt.Sprintf("ec%02d", i))
|
||||
}
|
||||
}
|
||||
|
||||
t.LogInfo("Successfully encoded volume to new EC shards", map[string]interface{}{
|
||||
"target_generation": t.targetGeneration,
|
||||
"shard_count": len(createdFiles),
|
||||
"created_files": createdFiles,
|
||||
"ecx_file": targetBaseFileName + ".ecx",
|
||||
"ecj_file": newEcjFile,
|
||||
"vif_file": newVifFile,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -234,33 +572,46 @@ func (t *EcVacuumTask) distributeNewEcShards() error {
|
||||
"temp_dir": t.tempDir,
|
||||
})
|
||||
|
||||
// TODO: Implement shard distribution logic
|
||||
// This should:
|
||||
// 1. Determine optimal placement for new EC shards across volume servers
|
||||
// 2. Copy .ec00-.ec13 files from worker's t.tempDir to target volume servers
|
||||
// 3. Copy .ecx/.ecj files from worker's t.tempDir to target volume servers
|
||||
// 4. Mount the new shards on target volume servers with target generation
|
||||
// For now, we'll distribute to the same nodes as before for simplicity
|
||||
targetBaseFileName := filepath.Join(t.tempDir, fmt.Sprintf("%s_%d_g%d", t.collection, t.volumeID, t.targetGeneration))
|
||||
|
||||
// Step 1: Find one server with dedicated index folder for shared index files (.ecx, .vif, .ecj)
|
||||
var indexServer pb.ServerAddress
|
||||
for serverAddr := range t.sourceNodes {
|
||||
// For now, use the first server as index server
|
||||
// TODO: Check if server has dedicated index folder capability
|
||||
indexServer = serverAddr
|
||||
break
|
||||
}
|
||||
|
||||
// Step 2: Distribute index files (.vif, .ecj) to index server only (shared files)
|
||||
// Note: .ecx files are skipped per user guidance - they can be regenerated
|
||||
if indexServer != "" {
|
||||
err := t.distributeIndexFiles(indexServer, targetBaseFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to distribute index files to %s: %w", indexServer, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: Distribute shard files (.ec00-.ec13) to appropriate volume servers
|
||||
for targetNode, originalShardBits := range t.sourceNodes {
|
||||
// Distribute the same shards that were originally on this target
|
||||
if originalShardBits.ShardIdCount() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
t.LogInfo("Copying new EC shards from worker to volume server", map[string]interface{}{
|
||||
t.LogInfo("Distributing EC shards to volume server", map[string]interface{}{
|
||||
"volume_id": t.volumeID,
|
||||
"shard_ids": originalShardBits.ShardIds(),
|
||||
"target_generation": t.targetGeneration,
|
||||
"from_worker": t.tempDir,
|
||||
"to_volume_server": targetNode,
|
||||
"target_server": targetNode,
|
||||
})
|
||||
|
||||
// TODO: Implement file copying from worker to volume server
|
||||
// This should copy the appropriate .ec** files from t.tempDir to targetNode
|
||||
err := t.distributeShardFiles(targetNode, originalShardBits.ShardIds(), targetBaseFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to distribute shards to %s: %w", targetNode, err)
|
||||
}
|
||||
|
||||
// TODO: Mount the new shards on the target volume server
|
||||
err := operation.WithVolumeServerClient(false, targetNode, t.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
||||
// Step 4: Mount the new shards on the target volume server
|
||||
err = operation.WithVolumeServerClient(false, targetNode, t.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
||||
_, mountErr := client.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{
|
||||
VolumeId: t.volumeID,
|
||||
Collection: t.collection,
|
||||
@@ -278,6 +629,199 @@ func (t *EcVacuumTask) distributeNewEcShards() error {
|
||||
}
|
||||
}
|
||||
|
||||
t.LogInfo("Successfully distributed all new EC shards", map[string]interface{}{
|
||||
"volume_id": t.volumeID,
|
||||
"target_generation": t.targetGeneration,
|
||||
"index_server": indexServer,
|
||||
"shard_servers": len(t.sourceNodes),
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// distributeIndexFiles distributes index files (.vif, .ecj) to a server with dedicated index folder
|
||||
func (t *EcVacuumTask) distributeIndexFiles(indexServer pb.ServerAddress, targetBaseFileName string) error {
|
||||
t.LogInfo("Distributing index files to index server", map[string]interface{}{
|
||||
"index_server": indexServer,
|
||||
"target_generation": t.targetGeneration,
|
||||
})
|
||||
|
||||
// List of index files to distribute (note: .ecx files are skipped)
|
||||
indexFiles := []string{
|
||||
targetBaseFileName + ".vif", // Volume info file
|
||||
targetBaseFileName + ".ecj", // Empty deletion journal for new generation
|
||||
}
|
||||
|
||||
return operation.WithVolumeServerClient(false, indexServer, t.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
||||
for _, localFile := range indexFiles {
|
||||
if _, err := os.Stat(localFile); os.IsNotExist(err) {
|
||||
t.LogInfo("Index file not found, skipping", map[string]interface{}{
|
||||
"file": localFile,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
err := t.sendFileToVolumeServer(client, localFile, indexServer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send index file %s: %w", localFile, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// distributeShardFiles distributes EC shard files (.ec00-.ec13) to a volume server
|
||||
func (t *EcVacuumTask) distributeShardFiles(targetServer pb.ServerAddress, shardIds []erasure_coding.ShardId, targetBaseFileName string) error {
|
||||
t.LogInfo("Distributing shard files to volume server", map[string]interface{}{
|
||||
"target_server": targetServer,
|
||||
"shard_ids": shardIds,
|
||||
"target_generation": t.targetGeneration,
|
||||
})
|
||||
|
||||
return operation.WithVolumeServerClient(false, targetServer, t.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
||||
for _, shardId := range shardIds {
|
||||
shardFile := fmt.Sprintf("%s.ec%02d", targetBaseFileName, shardId)
|
||||
if _, err := os.Stat(shardFile); os.IsNotExist(err) {
|
||||
return fmt.Errorf("shard file %s not found", shardFile)
|
||||
}
|
||||
|
||||
err := t.sendFileToVolumeServer(client, shardFile, targetServer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send shard file %s: %w", shardFile, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// copyFile copies a file from source to destination
|
||||
func (t *EcVacuumTask) copyFile(src, dst string) error {
|
||||
sourceFile, err := os.Open(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source file %s: %w", src, err)
|
||||
}
|
||||
defer sourceFile.Close()
|
||||
|
||||
destFile, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create destination file %s: %w", dst, err)
|
||||
}
|
||||
defer destFile.Close()
|
||||
|
||||
_, err = io.Copy(destFile, sourceFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy from %s to %s: %w", src, dst, err)
|
||||
}
|
||||
|
||||
return destFile.Sync()
|
||||
}
|
||||
|
||||
// sendFileToVolumeServer sends a file from worker to volume server using ReceiveFile RPC
|
||||
func (t *EcVacuumTask) sendFileToVolumeServer(client volume_server_pb.VolumeServerClient, localFile string, targetServer pb.ServerAddress) error {
|
||||
t.LogInfo("Sending file to volume server", map[string]interface{}{
|
||||
"local_file": localFile,
|
||||
"target_server": targetServer,
|
||||
"generation": t.targetGeneration,
|
||||
})
|
||||
|
||||
// Open the local file
|
||||
file, err := os.Open(localFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open local file %s: %w", localFile, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Get file info
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get file info for %s: %w", localFile, err)
|
||||
}
|
||||
|
||||
// Determine file extension and shard ID from local file path
|
||||
ext := filepath.Ext(localFile)
|
||||
var shardId uint32 = 0
|
||||
|
||||
// Parse shard ID from EC shard files (e.g., .ec00, .ec01, etc.)
|
||||
if strings.HasPrefix(ext, ".ec") && len(ext) == 5 {
|
||||
if shardIdInt, parseErr := strconv.Atoi(ext[3:]); parseErr == nil {
|
||||
shardId = uint32(shardIdInt)
|
||||
}
|
||||
}
|
||||
|
||||
t.LogInfo("Streaming file to volume server", map[string]interface{}{
|
||||
"file": localFile,
|
||||
"ext": ext,
|
||||
"shard_id": shardId,
|
||||
"file_size": fileInfo.Size(),
|
||||
"server": targetServer,
|
||||
})
|
||||
|
||||
// Create streaming client
|
||||
stream, err := client.ReceiveFile(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create receive stream: %w", err)
|
||||
}
|
||||
|
||||
// Send file info first with proper generation support
|
||||
err = stream.Send(&volume_server_pb.ReceiveFileRequest{
|
||||
Data: &volume_server_pb.ReceiveFileRequest_Info{
|
||||
Info: &volume_server_pb.ReceiveFileInfo{
|
||||
VolumeId: t.volumeID,
|
||||
Ext: ext,
|
||||
Collection: t.collection,
|
||||
IsEcVolume: true,
|
||||
ShardId: shardId,
|
||||
FileSize: uint64(fileInfo.Size()),
|
||||
Generation: t.targetGeneration, // Use proper generation field for file naming
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send file info: %w", err)
|
||||
}
|
||||
|
||||
// Send file content in chunks
|
||||
buffer := make([]byte, 64*1024) // 64KB chunks
|
||||
totalBytes := int64(0)
|
||||
for {
|
||||
n, readErr := file.Read(buffer)
|
||||
if n > 0 {
|
||||
err = stream.Send(&volume_server_pb.ReceiveFileRequest{
|
||||
Data: &volume_server_pb.ReceiveFileRequest_FileContent{
|
||||
FileContent: buffer[:n],
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send file content: %w", err)
|
||||
}
|
||||
totalBytes += int64(n)
|
||||
}
|
||||
if readErr == io.EOF {
|
||||
break
|
||||
}
|
||||
if readErr != nil {
|
||||
return fmt.Errorf("failed to read file: %w", readErr)
|
||||
}
|
||||
}
|
||||
|
||||
// Close stream and get response
|
||||
resp, err := stream.CloseAndRecv()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to close stream: %w", err)
|
||||
}
|
||||
|
||||
if resp.Error != "" {
|
||||
return fmt.Errorf("server error: %s", resp.Error)
|
||||
}
|
||||
|
||||
t.LogInfo("Successfully sent file to volume server", map[string]interface{}{
|
||||
"local_file": localFile,
|
||||
"target_server": targetServer,
|
||||
"bytes_written": resp.BytesWritten,
|
||||
"bytes_expected": totalBytes,
|
||||
"generation": t.targetGeneration,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user