mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-11-24 08:46:54 +08:00
Admin UI: Fetch task logs (#7114)
* show task details * loading tasks * task UI works * generic rendering * rendering the export link * removing placementConflicts from task parameters * remove TaskSourceLocation * remove "Server ID" column * rendering balance task source * sources and targets * fix ec task generation * move info * render timeline * simplified worker id * simplify * read task logs from worker * isValidTaskID * address comments * Update weed/worker/tasks/balance/execution.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/worker/tasks/erasure_coding/ec_task.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/worker/tasks/task_log_handler.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix shard ids * plan distributing shard id * rendering planned shards in task details * remove Conflicts * worker logs correctly * pass in dc and rack * task logging * Update weed/admin/maintenance/maintenance_queue.go Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * display log details * logs have fields now * sort field keys * fix link * fix collection filtering * avoid hard coded ec shard counts --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -96,13 +96,12 @@ type ActiveTopology struct {
|
||||
|
||||
// DestinationPlan represents a planned destination for a volume/shard operation
|
||||
type DestinationPlan struct {
|
||||
TargetNode string `json:"target_node"`
|
||||
TargetDisk uint32 `json:"target_disk"`
|
||||
TargetRack string `json:"target_rack"`
|
||||
TargetDC string `json:"target_dc"`
|
||||
ExpectedSize uint64 `json:"expected_size"`
|
||||
PlacementScore float64 `json:"placement_score"`
|
||||
Conflicts []string `json:"conflicts"`
|
||||
TargetNode string `json:"target_node"`
|
||||
TargetDisk uint32 `json:"target_disk"`
|
||||
TargetRack string `json:"target_rack"`
|
||||
TargetDC string `json:"target_dc"`
|
||||
ExpectedSize uint64 `json:"expected_size"`
|
||||
PlacementScore float64 `json:"placement_score"`
|
||||
}
|
||||
|
||||
// MultiDestinationPlan represents multiple planned destinations for operations like EC
|
||||
@@ -115,6 +114,8 @@ type MultiDestinationPlan struct {
|
||||
|
||||
// VolumeReplica represents a replica location with server and disk information
|
||||
type VolumeReplica struct {
|
||||
ServerID string `json:"server_id"`
|
||||
DiskID uint32 `json:"disk_id"`
|
||||
ServerID string `json:"server_id"`
|
||||
DiskID uint32 `json:"disk_id"`
|
||||
DataCenter string `json:"data_center"`
|
||||
Rack string `json:"rack"`
|
||||
}
|
||||
|
||||
@@ -233,6 +233,8 @@ const (
|
||||
type TaskSourceSpec struct {
|
||||
ServerID string
|
||||
DiskID uint32
|
||||
DataCenter string // Data center of the source server
|
||||
Rack string // Rack of the source server
|
||||
CleanupType SourceCleanupType // For EC: volume replica vs existing shards
|
||||
StorageImpact *StorageSlotChange // Optional: manual override
|
||||
EstimatedSize *int64 // Optional: manual override
|
||||
@@ -255,10 +257,3 @@ type TaskSpec struct {
|
||||
Sources []TaskSourceSpec // Can be single or multiple
|
||||
Destinations []TaskDestinationSpec // Can be single or multiple
|
||||
}
|
||||
|
||||
// TaskSourceLocation represents a source location for task creation (DEPRECATED: use TaskSourceSpec)
|
||||
type TaskSourceLocation struct {
|
||||
ServerID string
|
||||
DiskID uint32
|
||||
CleanupType SourceCleanupType // What type of cleanup is needed
|
||||
}
|
||||
|
||||
@@ -188,8 +188,10 @@ func (at *ActiveTopology) GetVolumeLocations(volumeID uint32, collection string)
|
||||
// Verify collection matches (since index doesn't include collection)
|
||||
if at.volumeMatchesCollection(disk, volumeID, collection) {
|
||||
replicas = append(replicas, VolumeReplica{
|
||||
ServerID: disk.NodeID,
|
||||
DiskID: disk.DiskID,
|
||||
ServerID: disk.NodeID,
|
||||
DiskID: disk.DiskID,
|
||||
DataCenter: disk.DataCenter,
|
||||
Rack: disk.Rack,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -214,8 +216,10 @@ func (at *ActiveTopology) GetECShardLocations(volumeID uint32, collection string
|
||||
// Verify collection matches (since index doesn't include collection)
|
||||
if at.ecShardMatchesCollection(disk, volumeID, collection) {
|
||||
ecShards = append(ecShards, VolumeReplica{
|
||||
ServerID: disk.NodeID,
|
||||
DiskID: disk.DiskID,
|
||||
ServerID: disk.NodeID,
|
||||
DiskID: disk.DiskID,
|
||||
DataCenter: disk.DataCenter,
|
||||
Rack: disk.Rack,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user