2025-07-30 12:38:03 -07:00
|
|
|
package erasure_coding
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/base"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/worker/types"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Detection implements the detection logic for erasure coding tasks
|
|
|
|
func Detection(metrics []*types.VolumeHealthMetrics, clusterInfo *types.ClusterInfo, config base.TaskConfig) ([]*types.TaskDetectionResult, error) {
|
|
|
|
if !config.IsEnabled() {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ecConfig := config.(*Config)
|
|
|
|
var results []*types.TaskDetectionResult
|
|
|
|
now := time.Now()
|
|
|
|
quietThreshold := time.Duration(ecConfig.QuietForSeconds) * time.Second
|
|
|
|
minSizeBytes := uint64(ecConfig.MinSizeMB) * 1024 * 1024 // Configurable minimum
|
|
|
|
|
|
|
|
debugCount := 0
|
|
|
|
skippedAlreadyEC := 0
|
|
|
|
skippedTooSmall := 0
|
|
|
|
skippedCollectionFilter := 0
|
|
|
|
skippedQuietTime := 0
|
|
|
|
skippedFullness := 0
|
|
|
|
|
|
|
|
for _, metric := range metrics {
|
|
|
|
// Skip if already EC volume
|
|
|
|
if metric.IsECVolume {
|
|
|
|
skippedAlreadyEC++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check minimum size requirement
|
|
|
|
if metric.Size < minSizeBytes {
|
|
|
|
skippedTooSmall++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check collection filter if specified
|
|
|
|
if ecConfig.CollectionFilter != "" {
|
|
|
|
// Parse comma-separated collections
|
|
|
|
allowedCollections := make(map[string]bool)
|
|
|
|
for _, collection := range strings.Split(ecConfig.CollectionFilter, ",") {
|
|
|
|
allowedCollections[strings.TrimSpace(collection)] = true
|
|
|
|
}
|
|
|
|
// Skip if volume's collection is not in the allowed list
|
|
|
|
if !allowedCollections[metric.Collection] {
|
|
|
|
skippedCollectionFilter++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check quiet duration and fullness criteria
|
|
|
|
if metric.Age >= quietThreshold && metric.FullnessRatio >= ecConfig.FullnessRatio {
|
|
|
|
result := &types.TaskDetectionResult{
|
|
|
|
TaskType: types.TaskTypeErasureCoding,
|
|
|
|
VolumeID: metric.VolumeID,
|
|
|
|
Server: metric.Server,
|
|
|
|
Collection: metric.Collection,
|
|
|
|
Priority: types.TaskPriorityLow, // EC is not urgent
|
2025-07-30 16:43:14 -07:00
|
|
|
Reason: fmt.Sprintf("Volume meets EC criteria: quiet for %.1fs (>%ds), fullness=%.1f%% (>%.1f%%), size=%.1fMB (>%dMB)",
|
2025-07-30 12:38:03 -07:00
|
|
|
metric.Age.Seconds(), ecConfig.QuietForSeconds, metric.FullnessRatio*100, ecConfig.FullnessRatio*100,
|
2025-07-30 16:43:14 -07:00
|
|
|
float64(metric.Size)/(1024*1024), ecConfig.MinSizeMB),
|
2025-07-30 12:38:03 -07:00
|
|
|
ScheduleAt: now,
|
|
|
|
}
|
|
|
|
results = append(results, result)
|
|
|
|
} else {
|
|
|
|
// Count debug reasons
|
|
|
|
if debugCount < 5 { // Limit to avoid spam
|
|
|
|
if metric.Age < quietThreshold {
|
|
|
|
skippedQuietTime++
|
|
|
|
}
|
|
|
|
if metric.FullnessRatio < ecConfig.FullnessRatio {
|
|
|
|
skippedFullness++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
debugCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Log debug summary if no tasks were created
|
|
|
|
if len(results) == 0 && len(metrics) > 0 {
|
|
|
|
totalVolumes := len(metrics)
|
|
|
|
glog.V(1).Infof("EC detection: No tasks created for %d volumes (skipped: %d already EC, %d too small, %d filtered, %d not quiet, %d not full)",
|
|
|
|
totalVolumes, skippedAlreadyEC, skippedTooSmall, skippedCollectionFilter, skippedQuietTime, skippedFullness)
|
|
|
|
|
|
|
|
// Show details for first few volumes
|
|
|
|
for i, metric := range metrics {
|
|
|
|
if i >= 3 || metric.IsECVolume { // Limit to first 3 non-EC volumes
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
sizeMB := float64(metric.Size) / (1024 * 1024)
|
|
|
|
glog.Infof("ERASURE CODING: Volume %d: size=%.1fMB (need ≥%dMB), age=%s (need ≥%s), fullness=%.1f%% (need ≥%.1f%%)",
|
|
|
|
metric.VolumeID, sizeMB, ecConfig.MinSizeMB, metric.Age.Truncate(time.Minute), quietThreshold.Truncate(time.Minute),
|
|
|
|
metric.FullnessRatio*100, ecConfig.FullnessRatio*100)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return results, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Scheduling implements the scheduling logic for erasure coding tasks
|
|
|
|
func Scheduling(task *types.Task, runningTasks []*types.Task, availableWorkers []*types.Worker, config base.TaskConfig) bool {
|
|
|
|
ecConfig := config.(*Config)
|
|
|
|
|
|
|
|
// Check if we have available workers
|
|
|
|
if len(availableWorkers) == 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Count running EC tasks
|
|
|
|
runningCount := 0
|
|
|
|
for _, runningTask := range runningTasks {
|
|
|
|
if runningTask.Type == types.TaskTypeErasureCoding {
|
|
|
|
runningCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check concurrency limit
|
|
|
|
if runningCount >= ecConfig.MaxConcurrent {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if any worker can handle EC tasks
|
|
|
|
for _, worker := range availableWorkers {
|
|
|
|
for _, capability := range worker.Capabilities {
|
|
|
|
if capability == types.TaskTypeErasureCoding {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|