Files
seaweedfs/weed/admin/handlers/cluster_handlers.go

431 lines
13 KiB
Go
Raw Normal View History

package handlers
import (
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/seaweedfs/seaweedfs/weed/admin/dash"
"github.com/seaweedfs/seaweedfs/weed/admin/view/app"
"github.com/seaweedfs/seaweedfs/weed/admin/view/layout"
)
// ClusterHandlers contains all the HTTP handlers for cluster management
type ClusterHandlers struct {
adminServer *dash.AdminServer
}
// NewClusterHandlers creates a new instance of ClusterHandlers
func NewClusterHandlers(adminServer *dash.AdminServer) *ClusterHandlers {
return &ClusterHandlers{
adminServer: adminServer,
}
}
// ShowClusterVolumeServers renders the cluster volume servers page
func (h *ClusterHandlers) ShowClusterVolumeServers(c *gin.Context) {
// Get cluster volume servers data
volumeServersData, err := h.adminServer.GetClusterVolumeServers()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster volume servers: " + err.Error()})
return
}
// Set username
username := c.GetString("username")
if username == "" {
username = "admin"
}
volumeServersData.Username = username
// Render HTML template
c.Header("Content-Type", "text/html")
volumeServersComponent := app.ClusterVolumeServers(*volumeServersData)
layoutComponent := layout.Layout(c, volumeServersComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
// ShowClusterVolumes renders the cluster volumes page
func (h *ClusterHandlers) ShowClusterVolumes(c *gin.Context) {
// Get pagination and sorting parameters from query string
page := 1
if p := c.Query("page"); p != "" {
if parsed, err := strconv.Atoi(p); err == nil && parsed > 0 {
page = parsed
}
}
pageSize := 100
if ps := c.Query("pageSize"); ps != "" {
if parsed, err := strconv.Atoi(ps); err == nil && parsed > 0 && parsed <= 1000 {
pageSize = parsed
}
}
sortBy := c.DefaultQuery("sortBy", "id")
sortOrder := c.DefaultQuery("sortOrder", "asc")
2025-07-02 22:48:21 -07:00
collection := c.Query("collection") // Optional collection filter
// Get cluster volumes data
2025-07-02 22:48:21 -07:00
volumesData, err := h.adminServer.GetClusterVolumes(page, pageSize, sortBy, sortOrder, collection)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster volumes: " + err.Error()})
return
}
// Set username
username := c.GetString("username")
if username == "" {
username = "admin"
}
volumesData.Username = username
// Render HTML template
c.Header("Content-Type", "text/html")
volumesComponent := app.ClusterVolumes(*volumesData)
layoutComponent := layout.Layout(c, volumesComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
2025-07-04 12:49:34 -07:00
// ShowVolumeDetails renders the volume details page
func (h *ClusterHandlers) ShowVolumeDetails(c *gin.Context) {
volumeIDStr := c.Param("id")
server := c.Param("server")
if volumeIDStr == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID is required"})
return
}
if server == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Server is required"})
return
}
volumeID, err := strconv.Atoi(volumeIDStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid volume ID"})
return
}
// Get volume details
volumeDetails, err := h.adminServer.GetVolumeDetails(volumeID, server)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get volume details: " + err.Error()})
return
}
// Render HTML template
c.Header("Content-Type", "text/html")
volumeDetailsComponent := app.VolumeDetails(*volumeDetails)
layoutComponent := layout.Layout(c, volumeDetailsComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
// ShowClusterCollections renders the cluster collections page
func (h *ClusterHandlers) ShowClusterCollections(c *gin.Context) {
// Get cluster collections data
collectionsData, err := h.adminServer.GetClusterCollections()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster collections: " + err.Error()})
return
}
// Set username
username := c.GetString("username")
if username == "" {
username = "admin"
}
collectionsData.Username = username
// Render HTML template
c.Header("Content-Type", "text/html")
collectionsComponent := app.ClusterCollections(*collectionsData)
layoutComponent := layout.Layout(c, collectionsComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
Admin: misc improvements on admin server and workers. EC now works. (#7055) * initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-07-30 12:38:03 -07:00
// ShowCollectionDetails renders the collection detail page
func (h *ClusterHandlers) ShowCollectionDetails(c *gin.Context) {
collectionName := c.Param("name")
if collectionName == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Collection name is required"})
return
}
// Map "default" collection to empty string for backend filtering
actualCollectionName := collectionName
if collectionName == "default" {
actualCollectionName = ""
}
Admin: misc improvements on admin server and workers. EC now works. (#7055) * initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-07-30 12:38:03 -07:00
// Parse query parameters
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "25"))
sortBy := c.DefaultQuery("sort_by", "volume_id")
sortOrder := c.DefaultQuery("sort_order", "asc")
// Get collection details data (volumes and EC volumes)
collectionDetailsData, err := h.adminServer.GetCollectionDetails(actualCollectionName, page, pageSize, sortBy, sortOrder)
Admin: misc improvements on admin server and workers. EC now works. (#7055) * initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-07-30 12:38:03 -07:00
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get collection details: " + err.Error()})
return
}
// Set username
username := c.GetString("username")
if username == "" {
username = "admin"
}
collectionDetailsData.Username = username
// Render HTML template
c.Header("Content-Type", "text/html")
collectionDetailsComponent := app.CollectionDetails(*collectionDetailsData)
layoutComponent := layout.Layout(c, collectionDetailsComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
// ShowClusterEcShards handles the cluster EC shards page (individual shards view)
func (h *ClusterHandlers) ShowClusterEcShards(c *gin.Context) {
// Parse query parameters
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "100"))
sortBy := c.DefaultQuery("sort_by", "volume_id")
sortOrder := c.DefaultQuery("sort_order", "asc")
collection := c.DefaultQuery("collection", "")
// Get data from admin server
data, err := h.adminServer.GetClusterEcVolumes(page, pageSize, sortBy, sortOrder, collection)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
// Set username
username := c.GetString("username")
if username == "" {
username = "admin"
}
data.Username = username
// Render template
c.Header("Content-Type", "text/html")
ecVolumesComponent := app.ClusterEcVolumes(*data)
layoutComponent := layout.Layout(c, ecVolumesComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
}
// ShowEcVolumeDetails renders the EC volume details page
func (h *ClusterHandlers) ShowEcVolumeDetails(c *gin.Context) {
volumeIDStr := c.Param("id")
if volumeIDStr == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID is required"})
return
}
volumeID, err := strconv.Atoi(volumeIDStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid volume ID"})
return
}
// Check that volumeID is within uint32 range
2025-07-31 23:23:20 -07:00
if volumeID < 0 {
Admin: misc improvements on admin server and workers. EC now works. (#7055) * initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-07-30 12:38:03 -07:00
c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID out of range"})
return
}
// Parse sorting parameters
sortBy := c.DefaultQuery("sort_by", "shard_id")
sortOrder := c.DefaultQuery("sort_order", "asc")
// Get EC volume details
ecVolumeDetails, err := h.adminServer.GetEcVolumeDetails(uint32(volumeID), sortBy, sortOrder)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get EC volume details: " + err.Error()})
return
}
// Set username
username := c.GetString("username")
if username == "" {
username = "admin"
}
ecVolumeDetails.Username = username
// Render HTML template
c.Header("Content-Type", "text/html")
ecVolumeDetailsComponent := app.EcVolumeDetails(*ecVolumeDetails)
layoutComponent := layout.Layout(c, ecVolumeDetailsComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
// ShowClusterMasters renders the cluster masters page
func (h *ClusterHandlers) ShowClusterMasters(c *gin.Context) {
// Get cluster masters data
mastersData, err := h.adminServer.GetClusterMasters()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster masters: " + err.Error()})
return
}
// Set username
username := c.GetString("username")
if username == "" {
username = "admin"
}
mastersData.Username = username
// Render HTML template
c.Header("Content-Type", "text/html")
mastersComponent := app.ClusterMasters(*mastersData)
layoutComponent := layout.Layout(c, mastersComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
// ShowClusterFilers renders the cluster filers page
func (h *ClusterHandlers) ShowClusterFilers(c *gin.Context) {
// Get cluster filers data
filersData, err := h.adminServer.GetClusterFilers()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster filers: " + err.Error()})
return
}
// Set username
username := c.GetString("username")
if username == "" {
username = "admin"
}
filersData.Username = username
// Render HTML template
c.Header("Content-Type", "text/html")
filersComponent := app.ClusterFilers(*filersData)
layoutComponent := layout.Layout(c, filersComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
// ShowClusterBrokers renders the cluster message brokers page
func (h *ClusterHandlers) ShowClusterBrokers(c *gin.Context) {
// Get cluster brokers data
brokersData, err := h.adminServer.GetClusterBrokers()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster brokers: " + err.Error()})
return
}
// Set username
username := c.GetString("username")
if username == "" {
username = "admin"
}
brokersData.Username = username
// Render HTML template
c.Header("Content-Type", "text/html")
brokersComponent := app.ClusterBrokers(*brokersData)
layoutComponent := layout.Layout(c, brokersComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
}
// GetClusterTopology returns the cluster topology as JSON
func (h *ClusterHandlers) GetClusterTopology(c *gin.Context) {
topology, err := h.adminServer.GetClusterTopology()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, topology)
}
// GetMasters returns master node information
func (h *ClusterHandlers) GetMasters(c *gin.Context) {
// Simple master info
2025-07-02 23:17:27 -07:00
c.JSON(http.StatusOK, gin.H{"masters": []gin.H{{"address": "localhost:9333"}}})
}
// GetVolumeServers returns volume server information
func (h *ClusterHandlers) GetVolumeServers(c *gin.Context) {
topology, err := h.adminServer.GetClusterTopology()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"volume_servers": topology.VolumeServers})
}
2025-07-04 13:11:43 -07:00
// VacuumVolume handles volume vacuum requests via API
func (h *ClusterHandlers) VacuumVolume(c *gin.Context) {
volumeIDStr := c.Param("id")
server := c.Param("server")
if volumeIDStr == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID is required"})
return
}
volumeID, err := strconv.Atoi(volumeIDStr)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid volume ID"})
return
}
// Perform vacuum operation
err = h.adminServer.VacuumVolume(volumeID, server)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"error": "Failed to vacuum volume: " + err.Error(),
})
return
}
c.JSON(http.StatusOK, gin.H{
"message": "Volume vacuum started successfully",
"volume_id": volumeID,
"server": server,
})
}