Admin UI: include ec shard sizes into volume server info (#7071)

* show ec shards on dashboard, show max in its own column

* master collect shard size info

* master send shard size via VolumeList

* change to more efficient shard sizes slice

* include ec shard sizes into volume server info

* Eliminated Redundant gRPC Calls

* much more efficient

* Efficient Counting: bits.OnesCount32() uses CPU-optimized instructions to count set bits in O(1)

* avoid extra volume list call

* simplify

* preserve existing shard sizes

* avoid hard coded value

* Update weed/storage/erasure_coding/ec_volume_info.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/admin/dash/volume_management.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update ec_volume_info.go

* address comments

* avoid duplicated functions

* Update weed/admin/dash/volume_management.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* simplify

* refactoring

* fix compilation

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
Chris Lu 2025-08-02 02:16:49 -07:00 committed by GitHub
parent 3d4e8409a5
commit 9d013ea9b8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 1144 additions and 319 deletions

View File

@ -23,6 +23,10 @@ type AdminData struct {
MessageBrokers []MessageBrokerNode `json:"message_brokers"`
DataCenters []DataCenter `json:"datacenters"`
LastUpdated time.Time `json:"last_updated"`
// EC shard totals for dashboard
TotalEcVolumes int `json:"total_ec_volumes"` // Total number of EC volumes across all servers
TotalEcShards int `json:"total_ec_shards"` // Total number of EC shards across all servers
}
// Object Store Users management structures
@ -98,6 +102,13 @@ func (s *AdminServer) GetAdminData(username string) (AdminData, error) {
return AdminData{}, err
}
// Get volume servers data with EC shard information
volumeServersData, err := s.GetClusterVolumeServers()
if err != nil {
glog.Errorf("Failed to get cluster volume servers: %v", err)
return AdminData{}, err
}
// Get master nodes status
masterNodes := s.getMasterNodesStatus()
@ -122,6 +133,19 @@ func (s *AdminServer) GetAdminData(username string) (AdminData, error) {
// Keep default value on error
}
// Calculate EC shard totals
var totalEcVolumes, totalEcShards int
ecVolumeSet := make(map[uint32]bool) // To avoid counting the same EC volume multiple times
for _, vs := range volumeServersData.VolumeServers {
totalEcShards += vs.EcShards
// Count unique EC volumes across all servers
for _, ecInfo := range vs.EcShardDetails {
ecVolumeSet[ecInfo.VolumeID] = true
}
}
totalEcVolumes = len(ecVolumeSet)
// Prepare admin data
adminData := AdminData{
Username: username,
@ -130,11 +154,13 @@ func (s *AdminServer) GetAdminData(username string) (AdminData, error) {
TotalSize: topology.TotalSize,
VolumeSizeLimitMB: volumeSizeLimitMB,
MasterNodes: masterNodes,
VolumeServers: topology.VolumeServers,
VolumeServers: volumeServersData.VolumeServers,
FilerNodes: filerNodes,
MessageBrokers: messageBrokers,
DataCenters: topology.DataCenters,
LastUpdated: topology.UpdatedAt,
TotalEcVolumes: totalEcVolumes,
TotalEcShards: totalEcShards,
}
return adminData, nil

View File

@ -76,6 +76,13 @@ func (s *AdminServer) getTopologyViaGRPC(topology *ClusterTopology) error {
totalSize += int64(volInfo.Size)
totalFiles += int64(volInfo.FileCount)
}
// Sum up EC shard sizes
for _, ecShardInfo := range diskInfo.EcShardInfos {
for _, shardSize := range ecShardInfo.ShardSizes {
totalSize += shardSize
}
}
}
vs := VolumeServer{

View File

@ -44,6 +44,22 @@ type VolumeServer struct {
DiskUsage int64 `json:"disk_usage"`
DiskCapacity int64 `json:"disk_capacity"`
LastHeartbeat time.Time `json:"last_heartbeat"`
// EC shard information
EcVolumes int `json:"ec_volumes"` // Number of EC volumes this server has shards for
EcShards int `json:"ec_shards"` // Total number of EC shards on this server
EcShardDetails []VolumeServerEcInfo `json:"ec_shard_details"` // Detailed EC shard information
}
// VolumeServerEcInfo represents EC shard information for a specific volume on a server
type VolumeServerEcInfo struct {
VolumeID uint32 `json:"volume_id"`
Collection string `json:"collection"`
ShardCount int `json:"shard_count"` // Number of shards this server has for this volume
EcIndexBits uint32 `json:"ec_index_bits"` // Bitmap of which shards this server has
ShardNumbers []int `json:"shard_numbers"` // List of actual shard numbers this server has
ShardSizes map[int]int64 `json:"shard_sizes"` // Map from shard number to size in bytes
TotalSize int64 `json:"total_size"` // Total size of all shards on this server for this volume
}
// S3 Bucket management structures

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
)
// GetClusterVolumes retrieves cluster volumes data with pagination, sorting, and filtering
@ -26,6 +27,7 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s
}
var volumes []VolumeWithTopology
var totalSize int64
var cachedTopologyInfo *master_pb.TopologyInfo
// Get detailed volume information via gRPC
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
@ -34,11 +36,15 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s
return err
}
// Cache the topology info for reuse
cachedTopologyInfo = resp.TopologyInfo
if resp.TopologyInfo != nil {
for _, dc := range resp.TopologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos {
for _, node := range rack.DataNodeInfos {
for _, diskInfo := range node.DiskInfos {
// Process regular volumes
for _, volInfo := range diskInfo.VolumeInfos {
volume := VolumeWithTopology{
VolumeInformationMessage: volInfo,
@ -49,6 +55,14 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s
volumes = append(volumes, volume)
totalSize += int64(volInfo.Size)
}
// Process EC shards in the same loop
for _, ecShardInfo := range diskInfo.EcShardInfos {
// Add all shard sizes for this EC volume
for _, shardSize := range ecShardInfo.ShardSizes {
totalSize += shardSize
}
}
}
}
}
@ -66,6 +80,8 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s
if collection != "" {
var filteredVolumes []VolumeWithTopology
var filteredTotalSize int64
var filteredEcTotalSize int64
for _, volume := range volumes {
// Handle "default" collection filtering for empty collections
volumeCollection := volume.Collection
@ -78,8 +94,36 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s
filteredTotalSize += int64(volume.Size)
}
}
// Filter EC shard sizes by collection using already processed data
// This reuses the topology traversal done above (lines 43-71) to avoid a second pass
if cachedTopologyInfo != nil {
for _, dc := range cachedTopologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos {
for _, node := range rack.DataNodeInfos {
for _, diskInfo := range node.DiskInfos {
for _, ecShardInfo := range diskInfo.EcShardInfos {
// Handle "default" collection filtering for empty collections
ecCollection := ecShardInfo.Collection
if ecCollection == "" {
ecCollection = "default"
}
if ecCollection == collection {
// Add all shard sizes for this EC volume
for _, shardSize := range ecShardInfo.ShardSizes {
filteredEcTotalSize += shardSize
}
}
}
}
}
}
}
}
volumes = filteredVolumes
totalSize = filteredTotalSize
totalSize = filteredTotalSize + filteredEcTotalSize
}
// Calculate unique data center, rack, disk type, collection, and version counts from filtered volumes
@ -370,23 +414,151 @@ func (s *AdminServer) VacuumVolume(volumeID int, server string) error {
})
}
// GetClusterVolumeServers retrieves cluster volume servers data
// GetClusterVolumeServers retrieves cluster volume servers data including EC shard information
func (s *AdminServer) GetClusterVolumeServers() (*ClusterVolumeServersData, error) {
topology, err := s.GetClusterTopology()
var volumeServerMap map[string]*VolumeServer
// Make only ONE VolumeList call and use it for both topology building AND EC shard processing
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
if err != nil {
return err
}
// Get volume size limit from response, default to 30GB if not set
volumeSizeLimitMB := resp.VolumeSizeLimitMb
if volumeSizeLimitMB == 0 {
volumeSizeLimitMB = 30000 // default to 30000MB (30GB)
}
// Build basic topology from the VolumeList response (replaces GetClusterTopology call)
volumeServerMap = make(map[string]*VolumeServer)
if resp.TopologyInfo != nil {
// Process topology to build basic volume server info (similar to cluster_topology.go logic)
for _, dc := range resp.TopologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos {
for _, node := range rack.DataNodeInfos {
// Initialize volume server if not exists
if volumeServerMap[node.Id] == nil {
volumeServerMap[node.Id] = &VolumeServer{
Address: node.Id,
DataCenter: dc.Id,
Rack: rack.Id,
Volumes: 0,
DiskUsage: 0,
DiskCapacity: 0,
EcVolumes: 0,
EcShards: 0,
EcShardDetails: []VolumeServerEcInfo{},
}
}
vs := volumeServerMap[node.Id]
// Process EC shard information for this server at volume server level (not per-disk)
ecVolumeMap := make(map[uint32]*VolumeServerEcInfo)
// Temporary map to accumulate shard info across disks
ecShardAccumulator := make(map[uint32][]*master_pb.VolumeEcShardInformationMessage)
// Process disk information
for _, diskInfo := range node.DiskInfos {
vs.DiskCapacity += int64(diskInfo.MaxVolumeCount) * int64(volumeSizeLimitMB) * 1024 * 1024 // Use actual volume size limit
// Count regular volumes and calculate disk usage
for _, volInfo := range diskInfo.VolumeInfos {
vs.Volumes++
vs.DiskUsage += int64(volInfo.Size)
}
// Accumulate EC shard information across all disks for this volume server
for _, ecShardInfo := range diskInfo.EcShardInfos {
volumeId := ecShardInfo.Id
ecShardAccumulator[volumeId] = append(ecShardAccumulator[volumeId], ecShardInfo)
}
}
// Process accumulated EC shard information per volume
for volumeId, ecShardInfos := range ecShardAccumulator {
if len(ecShardInfos) == 0 {
continue
}
// Initialize EC volume info
ecInfo := &VolumeServerEcInfo{
VolumeID: volumeId,
Collection: ecShardInfos[0].Collection,
ShardCount: 0,
EcIndexBits: 0,
ShardNumbers: []int{},
ShardSizes: make(map[int]int64),
TotalSize: 0,
}
// Merge EcIndexBits from all disks and collect shard sizes
allShardSizes := make(map[erasure_coding.ShardId]int64)
for _, ecShardInfo := range ecShardInfos {
ecInfo.EcIndexBits |= ecShardInfo.EcIndexBits
// Collect shard sizes from this disk
shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
shardBits.EachSetIndex(func(shardId erasure_coding.ShardId) {
if size, found := erasure_coding.GetShardSize(ecShardInfo, shardId); found {
allShardSizes[shardId] = size
}
})
}
// Process final merged shard information
finalShardBits := erasure_coding.ShardBits(ecInfo.EcIndexBits)
finalShardBits.EachSetIndex(func(shardId erasure_coding.ShardId) {
ecInfo.ShardCount++
ecInfo.ShardNumbers = append(ecInfo.ShardNumbers, int(shardId))
vs.EcShards++
// Add shard size if available
if shardSize, exists := allShardSizes[shardId]; exists {
ecInfo.ShardSizes[int(shardId)] = shardSize
ecInfo.TotalSize += shardSize
vs.DiskUsage += shardSize // Add EC shard size to total disk usage
}
})
ecVolumeMap[volumeId] = ecInfo
}
// Convert EC volume map to slice and update volume server (after processing all disks)
for _, ecInfo := range ecVolumeMap {
vs.EcShardDetails = append(vs.EcShardDetails, *ecInfo)
vs.EcVolumes++
}
}
}
}
}
return nil
})
if err != nil {
return nil, err
}
// Convert map back to slice
var volumeServers []VolumeServer
for _, vs := range volumeServerMap {
volumeServers = append(volumeServers, *vs)
}
var totalCapacity int64
var totalVolumes int
for _, vs := range topology.VolumeServers {
for _, vs := range volumeServers {
totalCapacity += vs.DiskCapacity
totalVolumes += vs.Volumes
}
return &ClusterVolumeServersData{
VolumeServers: topology.VolumeServers,
TotalVolumeServers: len(topology.VolumeServers),
VolumeServers: volumeServers,
TotalVolumeServers: len(volumeServers),
TotalVolumes: totalVolumes,
TotalCapacity: totalCapacity,
LastUpdated: time.Now(),

View File

@ -104,6 +104,53 @@ templ Admin(data dash.AdminData) {
</div>
</div>
<!-- Second Row for EC Shards Information -->
<div class="row mb-4">
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-secondary shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-secondary text-uppercase mb-1">
EC Volumes
</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
{fmt.Sprintf("%d", data.TotalEcVolumes)}
</div>
</div>
<div class="col-auto">
<i class="fas fa-layer-group fa-2x text-gray-300"></i>
</div>
</div>
</div>
</div>
</div>
<div class="col-xl-3 col-md-6 mb-4">
<div class="card border-left-dark shadow h-100 py-2">
<div class="card-body">
<div class="row no-gutters align-items-center">
<div class="col mr-2">
<div class="text-xs font-weight-bold text-dark text-uppercase mb-1">
EC Shards
</div>
<div class="h5 mb-0 font-weight-bold text-gray-800">
{fmt.Sprintf("%d", data.TotalEcShards)}
</div>
</div>
<div class="col-auto">
<i class="fas fa-puzzle-piece fa-2x text-gray-300"></i>
</div>
</div>
</div>
</div>
</div>
<!-- Empty columns to balance the row -->
<div class="col-xl-3 col-md-6 mb-4"></div>
<div class="col-xl-3 col-md-6 mb-4"></div>
</div>
<!-- Master Nodes Status -->
<div class="row mb-4">
<div class="col-lg-4">
@ -219,6 +266,7 @@ templ Admin(data dash.AdminData) {
<th>Data Center</th>
<th>Rack</th>
<th>Volumes</th>
<th>EC Shards</th>
<th>Capacity</th>
</tr>
</thead>
@ -242,12 +290,22 @@ templ Admin(data dash.AdminData) {
</div>
</div>
</td>
<td>
if vs.EcShards > 0 {
<span class="badge bg-info text-white me-1">{fmt.Sprintf("%d", vs.EcShards)}</span>
if vs.EcVolumes > 0 {
<small class="text-muted">({fmt.Sprintf("%d vol", vs.EcVolumes)})</small>
}
} else {
<span class="text-muted">-</span>
}
</td>
<td>{formatBytes(vs.DiskUsage)} / {formatBytes(vs.DiskCapacity)}</td>
</tr>
}
if len(data.VolumeServers) == 0 {
<tr>
<td colspan="6" class="text-center text-muted py-4">
<td colspan="7" class="text-center text-muted py-4">
<i class="fas fa-info-circle me-2"></i>
No volume servers found
</td>

View File

@ -86,324 +86,397 @@ func Admin(data dash.AdminData) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</div></div><div class=\"col-auto\"><i class=\"fas fa-cogs fa-2x text-gray-300\"></i></div></div></div></div></div></div><!-- Master Nodes Status --><div class=\"row mb-4\"><div class=\"col-lg-4\"><div class=\"card shadow mb-4\"><div class=\"card-header py-3\"><h6 class=\"m-0 font-weight-bold text-primary\"><i class=\"fas fa-server me-2\"></i>Master Nodes</h6></div><div class=\"card-body\"><div class=\"table-responsive\"><table class=\"table table-bordered\" width=\"100%\" cellspacing=\"0\"><thead><tr><th>Address</th><th>Role</th></tr></thead> <tbody>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</div></div><div class=\"col-auto\"><i class=\"fas fa-cogs fa-2x text-gray-300\"></i></div></div></div></div></div></div><!-- Second Row for EC Shards Information --><div class=\"row mb-4\"><div class=\"col-xl-3 col-md-6 mb-4\"><div class=\"card border-left-secondary shadow h-100 py-2\"><div class=\"card-body\"><div class=\"row no-gutters align-items-center\"><div class=\"col mr-2\"><div class=\"text-xs font-weight-bold text-secondary text-uppercase mb-1\">EC Volumes</div><div class=\"h5 mb-0 font-weight-bold text-gray-800\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, master := range data.MasterNodes {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<tr><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(master.Address)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 128, Col: 63}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if master.IsLeader {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "<span class=\"badge bg-primary\">Leader</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "<span class=\"badge bg-secondary\">Follower</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalEcVolumes))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 118, Col: 75}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "</tbody></table></div></div></div></div><!-- System Health --><div class=\"col-lg-8\"><div class=\"card shadow mb-4\"><div class=\"card-header py-3\"><h6 class=\"m-0 font-weight-bold text-primary\"><i class=\"fas fa-chart-pie me-2\"></i>Cluster</h6></div><div class=\"card-body text-center\"><div class=\"row\"><div class=\"col-3\"><div class=\"card bg-light\"><div class=\"card-body\"><h5>")
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "</div></div><div class=\"col-auto\"><i class=\"fas fa-layer-group fa-2x text-gray-300\"></i></div></div></div></div></div><div class=\"col-xl-3 col-md-6 mb-4\"><div class=\"card border-left-dark shadow h-100 py-2\"><div class=\"card-body\"><div class=\"row no-gutters align-items-center\"><div class=\"col mr-2\"><div class=\"text-xs font-weight-bold text-dark text-uppercase mb-1\">EC Shards</div><div class=\"h5 mb-0 font-weight-bold text-gray-800\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.MasterNodes)))
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalEcShards))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 158, Col: 85}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 138, Col: 74}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</h5><small class=\"text-muted\">Masters</small></div></div></div><div class=\"col-3\"><div class=\"card bg-light\"><div class=\"card-body\"><h5>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "</div></div><div class=\"col-auto\"><i class=\"fas fa-puzzle-piece fa-2x text-gray-300\"></i></div></div></div></div></div><!-- Empty columns to balance the row --><div class=\"col-xl-3 col-md-6 mb-4\"></div><div class=\"col-xl-3 col-md-6 mb-4\"></div></div><!-- Master Nodes Status --><div class=\"row mb-4\"><div class=\"col-lg-4\"><div class=\"card shadow mb-4\"><div class=\"card-header py-3\"><h6 class=\"m-0 font-weight-bold text-primary\"><i class=\"fas fa-server me-2\"></i>Master Nodes</h6></div><div class=\"card-body\"><div class=\"table-responsive\"><table class=\"table table-bordered\" width=\"100%\" cellspacing=\"0\"><thead><tr><th>Address</th><th>Role</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.VolumeServers)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 166, Col: 87}
for _, master := range data.MasterNodes {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "<tr><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(master.Address)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 175, Col: 63}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if master.IsLeader {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "<span class=\"badge bg-primary\">Leader</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "<span class=\"badge bg-secondary\">Follower</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "</h5><small class=\"text-muted\">Volume Servers</small></div></div></div><div class=\"col-3\"><div class=\"card bg-light\"><div class=\"card-body\"><h5>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "</tbody></table></div></div></div></div><!-- System Health --><div class=\"col-lg-8\"><div class=\"card shadow mb-4\"><div class=\"card-header py-3\"><h6 class=\"m-0 font-weight-bold text-primary\"><i class=\"fas fa-chart-pie me-2\"></i>Cluster</h6></div><div class=\"card-body text-center\"><div class=\"row\"><div class=\"col-3\"><div class=\"card bg-light\"><div class=\"card-body\"><h5>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var9 string
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.FilerNodes)))
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.MasterNodes)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 174, Col: 84}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 205, Col: 85}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "</h5><small class=\"text-muted\">Filers</small></div></div></div><div class=\"col-3\"><div class=\"card bg-light\"><div class=\"card-body\"><h5>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "</h5><small class=\"text-muted\">Masters</small></div></div></div><div class=\"col-3\"><div class=\"card bg-light\"><div class=\"card-body\"><h5>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var10 string
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.MessageBrokers)))
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.VolumeServers)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 182, Col: 88}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 213, Col: 87}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "</h5><small class=\"text-muted\">Message Brokers</small></div></div></div></div></div></div></div></div><!-- Volume Servers --><div class=\"row\"><div class=\"col-12\"><div class=\"card shadow mb-4\"><div class=\"card-header py-3 d-flex flex-row align-items-center justify-content-between\"><h6 class=\"m-0 font-weight-bold text-primary\"><i class=\"fas fa-database me-2\"></i>Volume Servers</h6><div class=\"dropdown no-arrow\"><a class=\"dropdown-toggle\" href=\"#\" role=\"button\" data-bs-toggle=\"dropdown\"><i class=\"fas fa-ellipsis-v fa-sm fa-fw text-gray-400\"></i></a><div class=\"dropdown-menu dropdown-menu-right shadow animated--fade-in\"><div class=\"dropdown-header\">Actions:</div><a class=\"dropdown-item\" href=\"/volumes\">View Details</a> <a class=\"dropdown-item\" href=\"/cluster\">Topology View</a></div></div></div><div class=\"card-body\"><div class=\"table-responsive\"><table class=\"table table-hover\" width=\"100%\" cellspacing=\"0\"><thead><tr><th>ID</th><th>Address</th><th>Data Center</th><th>Rack</th><th>Volumes</th><th>Capacity</th></tr></thead> <tbody>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "</h5><small class=\"text-muted\">Volume Servers</small></div></div></div><div class=\"col-3\"><div class=\"card bg-light\"><div class=\"card-body\"><h5>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var11 string
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.FilerNodes)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 221, Col: 84}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "</h5><small class=\"text-muted\">Filers</small></div></div></div><div class=\"col-3\"><div class=\"card bg-light\"><div class=\"card-body\"><h5>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var12 string
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.MessageBrokers)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 229, Col: 88}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "</h5><small class=\"text-muted\">Message Brokers</small></div></div></div></div></div></div></div></div><!-- Volume Servers --><div class=\"row\"><div class=\"col-12\"><div class=\"card shadow mb-4\"><div class=\"card-header py-3 d-flex flex-row align-items-center justify-content-between\"><h6 class=\"m-0 font-weight-bold text-primary\"><i class=\"fas fa-database me-2\"></i>Volume Servers</h6><div class=\"dropdown no-arrow\"><a class=\"dropdown-toggle\" href=\"#\" role=\"button\" data-bs-toggle=\"dropdown\"><i class=\"fas fa-ellipsis-v fa-sm fa-fw text-gray-400\"></i></a><div class=\"dropdown-menu dropdown-menu-right shadow animated--fade-in\"><div class=\"dropdown-header\">Actions:</div><a class=\"dropdown-item\" href=\"/volumes\">View Details</a> <a class=\"dropdown-item\" href=\"/cluster\">Topology View</a></div></div></div><div class=\"card-body\"><div class=\"table-responsive\"><table class=\"table table-hover\" width=\"100%\" cellspacing=\"0\"><thead><tr><th>ID</th><th>Address</th><th>Data Center</th><th>Rack</th><th>Volumes</th><th>EC Shards</th><th>Capacity</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, vs := range data.VolumeServers {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "<tr><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var11 string
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(vs.ID)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 228, Col: 54}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "</td><td><a href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var12 templ.SafeURL
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("http://%s/ui/index.html", vs.PublicURL)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 230, Col: 124}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "\" target=\"_blank\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "<tr><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Address)
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(vs.ID)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 231, Col: 63}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 276, Col: 54}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, " <i class=\"fas fa-external-link-alt ms-1 text-muted\"></i></a></td><td>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "</td><td><a href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(vs.DataCenter)
var templ_7745c5c3_Var14 templ.SafeURL
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("http://%s/ui/index.html", vs.PublicURL)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 235, Col: 62}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 278, Col: 124}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "</td><td>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "\" target=\"_blank\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Rack)
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Address)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 236, Col: 56}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 279, Col: 63}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "</td><td><div class=\"progress\" style=\"height: 20px;\"><div class=\"progress-bar\" role=\"progressbar\" style=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, " <i class=\"fas fa-external-link-alt ms-1 text-muted\"></i></a></td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templruntime.SanitizeStyleAttributeValues(fmt.Sprintf("width: %d%%", calculatePercent(vs.Volumes, vs.MaxVolumes)))
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(vs.DataCenter)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 240, Col: 135}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 283, Col: 62}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/%d", vs.Volumes, vs.MaxVolumes))
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Rack)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 241, Col: 104}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 284, Col: 56}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</div></div></td><td>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</td><td><div class=\"progress\" style=\"height: 20px;\"><div class=\"progress-bar\" role=\"progressbar\" style=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(vs.DiskUsage))
templ_7745c5c3_Var18, templ_7745c5c3_Err = templruntime.SanitizeStyleAttributeValues(fmt.Sprintf("width: %d%%", calculatePercent(vs.Volumes, vs.MaxVolumes)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 245, Col: 74}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 288, Col: 135}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, " / ")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var19 string
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(vs.DiskCapacity))
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/%d", vs.Volumes, vs.MaxVolumes))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 245, Col: 107}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 289, Col: 104}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "</td></tr>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "</div></div></td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
if len(data.VolumeServers) == 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "<tr><td colspan=\"6\" class=\"text-center text-muted py-4\"><i class=\"fas fa-info-circle me-2\"></i> No volume servers found</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "</tbody></table></div></div></div></div></div><!-- Filer Nodes --><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"card shadow mb-4\"><div class=\"card-header py-3 d-flex flex-row align-items-center justify-content-between\"><h6 class=\"m-0 font-weight-bold text-primary\"><i class=\"fas fa-folder me-2\"></i>Filer Nodes</h6><div class=\"dropdown no-arrow\"><a class=\"dropdown-toggle\" href=\"#\" role=\"button\" data-bs-toggle=\"dropdown\"><i class=\"fas fa-ellipsis-v fa-sm fa-fw text-gray-400\"></i></a><div class=\"dropdown-menu dropdown-menu-right shadow animated--fade-in\"><div class=\"dropdown-header\">Actions:</div><a class=\"dropdown-item\" href=\"/filer\">File Browser</a> <a class=\"dropdown-item\" href=\"/cluster\">Topology View</a></div></div></div><div class=\"card-body\"><div class=\"table-responsive\"><table class=\"table table-hover\" width=\"100%\" cellspacing=\"0\"><thead><tr><th>Address</th><th>Data Center</th><th>Rack</th><th>Last Updated</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, filer := range data.FilerNodes {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<tr><td><a href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var20 templ.SafeURL
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("http://%s", filer.Address)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 298, Col: 111}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "\" target=\"_blank\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var21 string
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Address)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 299, Col: 66}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, " <i class=\"fas fa-external-link-alt ms-1 text-muted\"></i></a></td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var22 string
templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(filer.DataCenter)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 303, Col: 65}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
if vs.EcShards > 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "<span class=\"badge bg-info text-white me-1\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", vs.EcShards))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 295, Col: 127}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "</span> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if vs.EcVolumes > 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<small class=\"text-muted\">(")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var21 string
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d vol", vs.EcVolumes))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 297, Col: 119}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, ")</small>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
} else {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "<span class=\"text-muted\">-</span>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var23 string
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Rack)
var templ_7745c5c3_Var22 string
templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(vs.DiskUsage))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 304, Col: 59}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 303, Col: 74}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, " / ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var23 string
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(vs.DiskCapacity))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 303, Col: 107}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var24 string
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(filer.LastUpdated.Format("2006-01-02 15:04:05"))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 305, Col: 96}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
if len(data.FilerNodes) == 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "<tr><td colspan=\"4\" class=\"text-center text-muted py-4\"><i class=\"fas fa-info-circle me-2\"></i> No filer nodes found</td></tr>")
if len(data.VolumeServers) == 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "<tr><td colspan=\"7\" class=\"text-center text-muted py-4\"><i class=\"fas fa-info-circle me-2\"></i> No volume servers found</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "</tbody></table></div></div></div></div></div><!-- Last Updated --><div class=\"row\"><div class=\"col-12\"><small class=\"text-muted\"><i class=\"fas fa-clock me-1\"></i> Last updated: ")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "</tbody></table></div></div></div></div></div><!-- Filer Nodes --><div class=\"row mb-4\"><div class=\"col-12\"><div class=\"card shadow mb-4\"><div class=\"card-header py-3 d-flex flex-row align-items-center justify-content-between\"><h6 class=\"m-0 font-weight-bold text-primary\"><i class=\"fas fa-folder me-2\"></i>Filer Nodes</h6><div class=\"dropdown no-arrow\"><a class=\"dropdown-toggle\" href=\"#\" role=\"button\" data-bs-toggle=\"dropdown\"><i class=\"fas fa-ellipsis-v fa-sm fa-fw text-gray-400\"></i></a><div class=\"dropdown-menu dropdown-menu-right shadow animated--fade-in\"><div class=\"dropdown-header\">Actions:</div><a class=\"dropdown-item\" href=\"/filer\">File Browser</a> <a class=\"dropdown-item\" href=\"/cluster\">Topology View</a></div></div></div><div class=\"card-body\"><div class=\"table-responsive\"><table class=\"table table-hover\" width=\"100%\" cellspacing=\"0\"><thead><tr><th>Address</th><th>Data Center</th><th>Rack</th><th>Last Updated</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var25 string
templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05"))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 329, Col: 81}
for _, filer := range data.FilerNodes {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "<tr><td><a href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var24 templ.SafeURL
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinURLErrs(templ.SafeURL(fmt.Sprintf("http://%s", filer.Address)))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 356, Col: 111}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "\" target=\"_blank\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var25 string
templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Address)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 357, Col: 66}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, " <i class=\"fas fa-external-link-alt ms-1 text-muted\"></i></a></td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var26 string
templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(filer.DataCenter)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 361, Col: 65}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var27 string
templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Rack)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 362, Col: 59}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var28 string
templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(filer.LastUpdated.Format("2006-01-02 15:04:05"))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 363, Col: 96}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
if len(data.FilerNodes) == 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "<tr><td colspan=\"4\" class=\"text-center text-muted py-4\"><i class=\"fas fa-info-circle me-2\"></i> No filer nodes found</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "</tbody></table></div></div></div></div></div><!-- Last Updated --><div class=\"row\"><div class=\"col-12\"><small class=\"text-muted\"><i class=\"fas fa-clock me-1\"></i> Last updated: ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "</small></div></div></div>")
var templ_7745c5c3_Var29 string
templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05"))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 387, Col: 81}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "</small></div></div></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}

View File

@ -103,6 +103,8 @@ templ ClusterVolumeServers(data dash.ClusterVolumeServersData) {
<th>Data Center</th>
<th>Rack</th>
<th>Volumes</th>
<th>Max Volumes</th>
<th>EC Shards</th>
<th>Capacity</th>
<th>Usage</th>
<th>Actions</th>
@ -133,9 +135,28 @@ templ ClusterVolumeServers(data dash.ClusterVolumeServersData) {
style={fmt.Sprintf("width: %d%%", calculatePercent(host.Volumes, host.MaxVolumes))}>
</div>
</div>
<small>{fmt.Sprintf("%d/%d", host.Volumes, host.MaxVolumes)}</small>
<span class="badge bg-primary">{fmt.Sprintf("%d", host.Volumes)}</span>
</div>
</td>
<td>
<span class="badge bg-secondary">{fmt.Sprintf("%d", host.MaxVolumes)}</span>
</td>
<td>
if host.EcShards > 0 {
<div class="d-flex align-items-center">
<i class="fas fa-layer-group me-1 text-info"></i>
<span class="badge bg-info text-white me-1">{fmt.Sprintf("%d", host.EcShards)}</span>
<small class="text-muted">shards</small>
</div>
if host.EcVolumes > 0 {
<div class="mt-1">
<small class="text-muted">{fmt.Sprintf("%d EC volumes", host.EcVolumes)}</small>
</div>
}
} else {
<span class="text-muted">-</span>
}
</td>
<td>{formatBytes(host.DiskCapacity)}</td>
<td>
<div class="d-flex align-items-center">
@ -161,6 +182,8 @@ templ ClusterVolumeServers(data dash.ClusterVolumeServersData) {
data-max-volumes={fmt.Sprintf("%d", host.MaxVolumes)}
data-disk-usage={fmt.Sprintf("%d", host.DiskUsage)}
data-disk-capacity={fmt.Sprintf("%d", host.DiskCapacity)}
data-ec-volumes={fmt.Sprintf("%d", host.EcVolumes)}
data-ec-shards={fmt.Sprintf("%d", host.EcShards)}
data-last-heartbeat={host.LastHeartbeat.Format("2006-01-02 15:04:05")}>
<i class="fas fa-eye"></i>
</button>
@ -213,6 +236,8 @@ templ ClusterVolumeServers(data dash.ClusterVolumeServersData) {
maxVolumes: parseInt(button.getAttribute('data-max-volumes')),
diskUsage: parseInt(button.getAttribute('data-disk-usage')),
diskCapacity: parseInt(button.getAttribute('data-disk-capacity')),
ecVolumes: parseInt(button.getAttribute('data-ec-volumes')),
ecShards: parseInt(button.getAttribute('data-ec-shards')),
lastHeartbeat: button.getAttribute('data-last-heartbeat')
};
showVolumeServerDetails(serverData);
@ -268,6 +293,19 @@ templ ClusterVolumeServers(data dash.ClusterVolumeServersData) {
'</table>' +
'</div>' +
'</div>' +
// Add EC Shard information if available
(server.ecShards > 0 ?
'<div class="row mt-3">' +
'<div class="col-12">' +
'<h6 class="text-primary"><i class="fas fa-layer-group me-1"></i>Erasure Coding Information</h6>' +
'<table class="table table-sm">' +
'<tr><td><strong>EC Volumes:</strong></td><td><span class="badge bg-info text-white">' + server.ecVolumes + '</span></td></tr>' +
'<tr><td><strong>EC Shards:</strong></td><td><span class="badge bg-info text-white">' + server.ecShards + '</span></td></tr>' +
'</table>' +
'</div>' +
'</div>' : '') +
'<div class="row mt-3">' +
'<div class="col-12">' +
'<h6 class="text-primary"><i class="fas fa-link me-1"></i>Quick Actions</h6>' +

File diff suppressed because one or more lines are too long

View File

@ -129,6 +129,7 @@ message VolumeEcShardInformationMessage {
string disk_type = 4;
uint64 expire_at_sec = 5; // used to record the destruction time of ec volume
uint32 disk_id = 6;
repeated int64 shard_sizes = 7; // optimized: sizes for shards in order of set bits in ec_index_bits
}
message StorageBackend {

View File

@ -560,6 +560,7 @@ type VolumeEcShardInformationMessage struct {
DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
ExpireAtSec uint64 `protobuf:"varint,5,opt,name=expire_at_sec,json=expireAtSec,proto3" json:"expire_at_sec,omitempty"` // used to record the destruction time of ec volume
DiskId uint32 `protobuf:"varint,6,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"`
ShardSizes []int64 `protobuf:"varint,7,rep,packed,name=shard_sizes,json=shardSizes,proto3" json:"shard_sizes,omitempty"` // optimized: sizes for shards in order of set bits in ec_index_bits
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@ -636,6 +637,13 @@ func (x *VolumeEcShardInformationMessage) GetDiskId() uint32 {
return 0
}
func (x *VolumeEcShardInformationMessage) GetShardSizes() []int64 {
if x != nil {
return x.ShardSizes
}
return nil
}
type StorageBackend struct {
state protoimpl.MessageState `protogen:"open.v1"`
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
@ -4098,7 +4106,7 @@ const file_master_proto_rawDesc = "" +
"\x03ttl\x18\n" +
" \x01(\rR\x03ttl\x12\x1b\n" +
"\tdisk_type\x18\x0f \x01(\tR\bdiskType\x12\x17\n" +
"\adisk_id\x18\x10 \x01(\rR\x06diskId\"\xcf\x01\n" +
"\adisk_id\x18\x10 \x01(\rR\x06diskId\"\xf0\x01\n" +
"\x1fVolumeEcShardInformationMessage\x12\x0e\n" +
"\x02id\x18\x01 \x01(\rR\x02id\x12\x1e\n" +
"\n" +
@ -4107,7 +4115,9 @@ const file_master_proto_rawDesc = "" +
"\rec_index_bits\x18\x03 \x01(\rR\vecIndexBits\x12\x1b\n" +
"\tdisk_type\x18\x04 \x01(\tR\bdiskType\x12\"\n" +
"\rexpire_at_sec\x18\x05 \x01(\x04R\vexpireAtSec\x12\x17\n" +
"\adisk_id\x18\x06 \x01(\rR\x06diskId\"\xbe\x01\n" +
"\adisk_id\x18\x06 \x01(\rR\x06diskId\x12\x1f\n" +
"\vshard_sizes\x18\a \x03(\x03R\n" +
"shardSizes\"\xbe\x01\n" +
"\x0eStorageBackend\x12\x12\n" +
"\x04type\x18\x01 \x01(\tR\x04type\x12\x0e\n" +
"\x02id\x18\x02 \x01(\tR\x02id\x12I\n" +

View File

@ -13,6 +13,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util"
"io"
)
@ -248,8 +249,27 @@ func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInf
if destroyTime > 0 {
expireAtString = fmt.Sprintf("expireAt:%s", time.Unix(int64(destroyTime), 0).Format("2006-01-02 15:04:05"))
}
output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v %s\n",
ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds(), expireAtString)
// Build shard size information
shardIds := erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds()
var totalSize int64
var shardSizeInfo string
if len(ecShardInfo.ShardSizes) > 0 {
var shardDetails []string
for _, shardId := range shardIds {
if size, found := erasure_coding.GetShardSize(ecShardInfo, erasure_coding.ShardId(shardId)); found {
shardDetails = append(shardDetails, fmt.Sprintf("%d:%s", shardId, util.BytesToHumanReadable(uint64(size))))
totalSize += size
} else {
shardDetails = append(shardDetails, fmt.Sprintf("%d:?", shardId))
}
}
shardSizeInfo = fmt.Sprintf(" sizes:[%s] total:%s", strings.Join(shardDetails, " "), util.BytesToHumanReadable(uint64(totalSize)))
}
output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v%s %s\n",
ecShardInfo.Id, ecShardInfo.Collection, shardIds, shardSizeInfo, expireAtString)
}
output((volumeInfosFound || ecShardInfoFound) && verbosityLevel >= 4, writer, " Disk %s %+v \n", diskType, s)
return s

View File

@ -18,6 +18,7 @@ const (
DataShardsCount = 10
ParityShardsCount = 4
TotalShardsCount = DataShardsCount + ParityShardsCount
MinTotalDisks = TotalShardsCount/ParityShardsCount + 1
ErasureCodingLargeBlockSize = 1024 * 1024 * 1024 // 1GB
ErasureCodingSmallBlockSize = 1024 * 1024 // 1MB
)

View File

@ -0,0 +1,68 @@
package erasure_coding
import (
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
)
// GetShardSize returns the size of a specific shard from VolumeEcShardInformationMessage
// Returns the size and true if the shard exists, 0 and false if not present
func GetShardSize(msg *master_pb.VolumeEcShardInformationMessage, shardId ShardId) (size int64, found bool) {
if msg == nil || msg.ShardSizes == nil {
return 0, false
}
shardBits := ShardBits(msg.EcIndexBits)
index, found := shardBits.ShardIdToIndex(shardId)
if !found || index >= len(msg.ShardSizes) {
return 0, false
}
return msg.ShardSizes[index], true
}
// SetShardSize sets the size of a specific shard in VolumeEcShardInformationMessage
// Returns true if successful, false if the shard is not present in EcIndexBits
func SetShardSize(msg *master_pb.VolumeEcShardInformationMessage, shardId ShardId, size int64) bool {
if msg == nil {
return false
}
shardBits := ShardBits(msg.EcIndexBits)
index, found := shardBits.ShardIdToIndex(shardId)
if !found {
return false
}
// Initialize ShardSizes slice if needed
expectedLength := shardBits.ShardIdCount()
if msg.ShardSizes == nil {
msg.ShardSizes = make([]int64, expectedLength)
} else if len(msg.ShardSizes) != expectedLength {
// Resize the slice to match the expected length
newSizes := make([]int64, expectedLength)
copy(newSizes, msg.ShardSizes)
msg.ShardSizes = newSizes
}
if index >= len(msg.ShardSizes) {
return false
}
msg.ShardSizes[index] = size
return true
}
// InitializeShardSizes initializes the ShardSizes slice based on EcIndexBits
// This ensures the slice has the correct length for all present shards
func InitializeShardSizes(msg *master_pb.VolumeEcShardInformationMessage) {
if msg == nil {
return
}
shardBits := ShardBits(msg.EcIndexBits)
expectedLength := shardBits.ShardIdCount()
if msg.ShardSizes == nil || len(msg.ShardSizes) != expectedLength {
msg.ShardSizes = make([]int64, expectedLength)
}
}

View File

@ -0,0 +1,117 @@
package erasure_coding
import (
"testing"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
)
func TestShardSizeHelpers(t *testing.T) {
// Create a message with shards 0, 2, and 5 present (EcIndexBits = 0b100101 = 37)
msg := &master_pb.VolumeEcShardInformationMessage{
Id: 123,
EcIndexBits: 37, // Binary: 100101, shards 0, 2, 5 are present
}
// Test SetShardSize
if !SetShardSize(msg, 0, 1000) {
t.Error("Failed to set size for shard 0")
}
if !SetShardSize(msg, 2, 2000) {
t.Error("Failed to set size for shard 2")
}
if !SetShardSize(msg, 5, 5000) {
t.Error("Failed to set size for shard 5")
}
// Test setting size for non-present shard should fail
if SetShardSize(msg, 1, 1500) {
t.Error("Should not be able to set size for non-present shard 1")
}
// Verify ShardSizes slice has correct length (3 shards)
if len(msg.ShardSizes) != 3 {
t.Errorf("Expected ShardSizes length 3, got %d", len(msg.ShardSizes))
}
// Test GetShardSize
if size, found := GetShardSize(msg, 0); !found || size != 1000 {
t.Errorf("Expected shard 0 size 1000, got %d (found: %v)", size, found)
}
if size, found := GetShardSize(msg, 2); !found || size != 2000 {
t.Errorf("Expected shard 2 size 2000, got %d (found: %v)", size, found)
}
if size, found := GetShardSize(msg, 5); !found || size != 5000 {
t.Errorf("Expected shard 5 size 5000, got %d (found: %v)", size, found)
}
// Test getting size for non-present shard
if size, found := GetShardSize(msg, 1); found {
t.Errorf("Should not find shard 1, but got size %d", size)
}
// Test direct slice access
if len(msg.ShardSizes) != 3 {
t.Errorf("Expected 3 shard sizes in slice, got %d", len(msg.ShardSizes))
}
expectedSizes := []int64{1000, 2000, 5000} // Ordered by shard ID: 0, 2, 5
for i, expectedSize := range expectedSizes {
if i < len(msg.ShardSizes) && msg.ShardSizes[i] != expectedSize {
t.Errorf("Expected ShardSizes[%d] = %d, got %d", i, expectedSize, msg.ShardSizes[i])
}
}
}
func TestShardBitsHelpers(t *testing.T) {
// Test with EcIndexBits = 37 (binary: 100101, shards 0, 2, 5)
shardBits := ShardBits(37)
// Test ShardIdToIndex
if index, found := shardBits.ShardIdToIndex(0); !found || index != 0 {
t.Errorf("Expected shard 0 at index 0, got %d (found: %v)", index, found)
}
if index, found := shardBits.ShardIdToIndex(2); !found || index != 1 {
t.Errorf("Expected shard 2 at index 1, got %d (found: %v)", index, found)
}
if index, found := shardBits.ShardIdToIndex(5); !found || index != 2 {
t.Errorf("Expected shard 5 at index 2, got %d (found: %v)", index, found)
}
// Test for non-present shard
if index, found := shardBits.ShardIdToIndex(1); found {
t.Errorf("Should not find shard 1, but got index %d", index)
}
// Test IndexToShardId
if shardId, found := shardBits.IndexToShardId(0); !found || shardId != 0 {
t.Errorf("Expected index 0 to be shard 0, got %d (found: %v)", shardId, found)
}
if shardId, found := shardBits.IndexToShardId(1); !found || shardId != 2 {
t.Errorf("Expected index 1 to be shard 2, got %d (found: %v)", shardId, found)
}
if shardId, found := shardBits.IndexToShardId(2); !found || shardId != 5 {
t.Errorf("Expected index 2 to be shard 5, got %d (found: %v)", shardId, found)
}
// Test for invalid index
if shardId, found := shardBits.IndexToShardId(3); found {
t.Errorf("Should not find shard for index 3, but got shard %d", shardId)
}
// Test EachSetIndex
var collectedShards []ShardId
shardBits.EachSetIndex(func(shardId ShardId) {
collectedShards = append(collectedShards, shardId)
})
expectedShards := []ShardId{0, 2, 5}
if len(collectedShards) != len(expectedShards) {
t.Errorf("Expected EachSetIndex to collect %v, got %v", expectedShards, collectedShards)
}
for i, expected := range expectedShards {
if i >= len(collectedShards) || collectedShards[i] != expected {
t.Errorf("Expected EachSetIndex to collect %v, got %v", expectedShards, collectedShards)
break
}
}
}

View File

@ -227,6 +227,9 @@ func (ev *EcVolume) ToVolumeEcShardInformationMessage(diskId uint32) (messages [
}
prevVolumeId = s.VolumeId
m.EcIndexBits = uint32(ShardBits(m.EcIndexBits).AddShardId(s.ShardId))
// Add shard size information using the optimized format
SetShardSize(m, s.ShardId, s.Size())
}
return
}

View File

@ -1,6 +1,8 @@
package erasure_coding
import (
"math/bits"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
)
@ -11,27 +13,51 @@ type EcVolumeInfo struct {
Collection string
ShardBits ShardBits
DiskType string
DiskId uint32 // ID of the disk this EC volume is on
ExpireAtSec uint64 // ec volume destroy time, calculated from the ec volume was created
}
func NewEcVolumeInfo(diskType string, collection string, vid needle.VolumeId, shardBits ShardBits, expireAtSec uint64, diskId uint32) *EcVolumeInfo {
return &EcVolumeInfo{
Collection: collection,
VolumeId: vid,
ShardBits: shardBits,
DiskType: diskType,
DiskId: diskId,
ExpireAtSec: expireAtSec,
}
DiskId uint32 // ID of the disk this EC volume is on
ExpireAtSec uint64 // ec volume destroy time, calculated from the ec volume was created
ShardSizes []int64 // optimized: sizes for shards in order of set bits in ShardBits
}
func (ecInfo *EcVolumeInfo) AddShardId(id ShardId) {
oldBits := ecInfo.ShardBits
ecInfo.ShardBits = ecInfo.ShardBits.AddShardId(id)
// If shard was actually added, resize ShardSizes array
if oldBits != ecInfo.ShardBits {
ecInfo.resizeShardSizes(oldBits)
}
}
func (ecInfo *EcVolumeInfo) RemoveShardId(id ShardId) {
oldBits := ecInfo.ShardBits
ecInfo.ShardBits = ecInfo.ShardBits.RemoveShardId(id)
// If shard was actually removed, resize ShardSizes array
if oldBits != ecInfo.ShardBits {
ecInfo.resizeShardSizes(oldBits)
}
}
func (ecInfo *EcVolumeInfo) SetShardSize(id ShardId, size int64) {
ecInfo.ensureShardSizesInitialized()
if index, found := ecInfo.ShardBits.ShardIdToIndex(id); found && index < len(ecInfo.ShardSizes) {
ecInfo.ShardSizes[index] = size
}
}
func (ecInfo *EcVolumeInfo) GetShardSize(id ShardId) (int64, bool) {
if index, found := ecInfo.ShardBits.ShardIdToIndex(id); found && index < len(ecInfo.ShardSizes) {
return ecInfo.ShardSizes[index], true
}
return 0, false
}
func (ecInfo *EcVolumeInfo) GetTotalSize() int64 {
var total int64
for _, size := range ecInfo.ShardSizes {
total += size
}
return total
}
func (ecInfo *EcVolumeInfo) HasShardId(id ShardId) bool {
@ -48,17 +74,33 @@ func (ecInfo *EcVolumeInfo) ShardIdCount() (count int) {
func (ecInfo *EcVolumeInfo) Minus(other *EcVolumeInfo) *EcVolumeInfo {
ret := &EcVolumeInfo{
VolumeId: ecInfo.VolumeId,
Collection: ecInfo.Collection,
ShardBits: ecInfo.ShardBits.Minus(other.ShardBits),
DiskType: ecInfo.DiskType,
VolumeId: ecInfo.VolumeId,
Collection: ecInfo.Collection,
ShardBits: ecInfo.ShardBits.Minus(other.ShardBits),
DiskType: ecInfo.DiskType,
DiskId: ecInfo.DiskId,
ExpireAtSec: ecInfo.ExpireAtSec,
}
// Initialize optimized ShardSizes for the result
ret.ensureShardSizesInitialized()
// Copy shard sizes for remaining shards
retIndex := 0
for shardId := ShardId(0); shardId < TotalShardsCount && retIndex < len(ret.ShardSizes); shardId++ {
if ret.ShardBits.HasShardId(shardId) {
if size, exists := ecInfo.GetShardSize(shardId); exists {
ret.ShardSizes[retIndex] = size
}
retIndex++
}
}
return ret
}
func (ecInfo *EcVolumeInfo) ToVolumeEcShardInformationMessage() (ret *master_pb.VolumeEcShardInformationMessage) {
return &master_pb.VolumeEcShardInformationMessage{
t := &master_pb.VolumeEcShardInformationMessage{
Id: uint32(ecInfo.VolumeId),
EcIndexBits: uint32(ecInfo.ShardBits),
Collection: ecInfo.Collection,
@ -66,6 +108,12 @@ func (ecInfo *EcVolumeInfo) ToVolumeEcShardInformationMessage() (ret *master_pb.
ExpireAtSec: ecInfo.ExpireAtSec,
DiskId: ecInfo.DiskId,
}
// Directly set the optimized ShardSizes
t.ShardSizes = make([]int64, len(ecInfo.ShardSizes))
copy(t.ShardSizes, ecInfo.ShardSizes)
return t
}
type ShardBits uint32 // use bits to indicate the shard id, use 32 bits just for possible future extension
@ -121,3 +169,81 @@ func (b ShardBits) MinusParityShards() ShardBits {
}
return b
}
// ShardIdToIndex converts a shard ID to its index position in the ShardSizes slice
// Returns the index and true if the shard is present, -1 and false if not present
func (b ShardBits) ShardIdToIndex(shardId ShardId) (index int, found bool) {
if !b.HasShardId(shardId) {
return -1, false
}
// Create a mask for bits before the shardId
mask := uint32((1 << shardId) - 1)
// Count set bits before the shardId using efficient bit manipulation
index = bits.OnesCount32(uint32(b) & mask)
return index, true
}
// EachSetIndex iterates over all set shard IDs and calls the provided function for each
// This is highly efficient using bit manipulation - only iterates over actual set bits
func (b ShardBits) EachSetIndex(fn func(shardId ShardId)) {
bitsValue := uint32(b)
for bitsValue != 0 {
// Find the position of the least significant set bit
shardId := ShardId(bits.TrailingZeros32(bitsValue))
fn(shardId)
// Clear the least significant set bit
bitsValue &= bitsValue - 1
}
}
// IndexToShardId converts an index position in ShardSizes slice to the corresponding shard ID
// Returns the shard ID and true if valid index, -1 and false if invalid index
func (b ShardBits) IndexToShardId(index int) (shardId ShardId, found bool) {
if index < 0 {
return 0, false
}
currentIndex := 0
for i := ShardId(0); i < TotalShardsCount; i++ {
if b.HasShardId(i) {
if currentIndex == index {
return i, true
}
currentIndex++
}
}
return 0, false // index out of range
}
// Helper methods for EcVolumeInfo to manage the optimized ShardSizes slice
func (ecInfo *EcVolumeInfo) ensureShardSizesInitialized() {
expectedLength := ecInfo.ShardBits.ShardIdCount()
if ecInfo.ShardSizes == nil {
ecInfo.ShardSizes = make([]int64, expectedLength)
} else if len(ecInfo.ShardSizes) != expectedLength {
// Resize and preserve existing data
ecInfo.resizeShardSizes(ecInfo.ShardBits)
}
}
func (ecInfo *EcVolumeInfo) resizeShardSizes(prevShardBits ShardBits) {
expectedLength := ecInfo.ShardBits.ShardIdCount()
newSizes := make([]int64, expectedLength)
// Copy existing sizes to new positions based on current ShardBits
if len(ecInfo.ShardSizes) > 0 {
newIndex := 0
for shardId := ShardId(0); shardId < TotalShardsCount && newIndex < expectedLength; shardId++ {
if ecInfo.ShardBits.HasShardId(shardId) {
// Try to find the size for this shard in the old array using previous ShardBits
if oldIndex, found := prevShardBits.ShardIdToIndex(shardId); found && oldIndex < len(ecInfo.ShardSizes) {
newSizes[newIndex] = ecInfo.ShardSizes[oldIndex]
}
newIndex++
}
}
}
ecInfo.ShardSizes = newSizes
}

View File

@ -17,14 +17,18 @@ func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInf
// convert into in memory struct storage.VolumeInfo
var shards []*erasure_coding.EcVolumeInfo
for _, shardInfo := range shardInfos {
shards = append(shards,
erasure_coding.NewEcVolumeInfo(
shardInfo.DiskType,
shardInfo.Collection,
needle.VolumeId(shardInfo.Id),
erasure_coding.ShardBits(shardInfo.EcIndexBits),
shardInfo.ExpireAtSec,
shardInfo.DiskId))
// Create EcVolumeInfo directly with optimized format
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(shardInfo.Id),
Collection: shardInfo.Collection,
ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits),
DiskType: shardInfo.DiskType,
DiskId: shardInfo.DiskId,
ExpireAtSec: shardInfo.ExpireAtSec,
ShardSizes: shardInfo.ShardSizes,
}
shards = append(shards, ecVolumeInfo)
}
// find out the delta volumes
newShards, deletedShards = dn.UpdateEcShards(shards)
@ -41,24 +45,32 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
// convert into in memory struct storage.VolumeInfo
var newShards, deletedShards []*erasure_coding.EcVolumeInfo
for _, shardInfo := range newEcShards {
newShards = append(newShards,
erasure_coding.NewEcVolumeInfo(
shardInfo.DiskType,
shardInfo.Collection,
needle.VolumeId(shardInfo.Id),
erasure_coding.ShardBits(shardInfo.EcIndexBits),
shardInfo.ExpireAtSec,
shardInfo.DiskId))
// Create EcVolumeInfo directly with optimized format
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(shardInfo.Id),
Collection: shardInfo.Collection,
ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits),
DiskType: shardInfo.DiskType,
DiskId: shardInfo.DiskId,
ExpireAtSec: shardInfo.ExpireAtSec,
ShardSizes: shardInfo.ShardSizes,
}
newShards = append(newShards, ecVolumeInfo)
}
for _, shardInfo := range deletedEcShards {
deletedShards = append(deletedShards,
erasure_coding.NewEcVolumeInfo(
shardInfo.DiskType,
shardInfo.Collection,
needle.VolumeId(shardInfo.Id),
erasure_coding.ShardBits(shardInfo.EcIndexBits),
shardInfo.ExpireAtSec,
shardInfo.DiskId))
// Create EcVolumeInfo directly with optimized format
ecVolumeInfo := &erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(shardInfo.Id),
Collection: shardInfo.Collection,
ShardBits: erasure_coding.ShardBits(shardInfo.EcIndexBits),
DiskType: shardInfo.DiskType,
DiskId: shardInfo.DiskId,
ExpireAtSec: shardInfo.ExpireAtSec,
ShardSizes: shardInfo.ShardSizes,
}
deletedShards = append(deletedShards, ecVolumeInfo)
}
dn.DeltaUpdateEcShards(newShards, deletedShards)
@ -69,7 +81,6 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
for _, v := range deletedShards {
t.UnRegisterEcShards(v, dn)
}
return
}
func NewEcShardLocations(collection string) *EcShardLocations {
@ -178,6 +189,4 @@ func (t *Topology) DeleteEcCollection(collection string) {
for _, vid := range vids {
delete(t.ecShardMap, vid)
}
return
}

View File

@ -168,19 +168,16 @@ func planECDestinations(activeTopology *topology.ActiveTopology, metric *types.V
}
}
// Determine minimum shard disk locations based on configuration
minTotalDisks := 4
// Get available disks for EC placement (include source node for EC)
availableDisks := activeTopology.GetAvailableDisks(topology.TaskTypeErasureCoding, "")
if len(availableDisks) < minTotalDisks {
return nil, fmt.Errorf("insufficient disks for EC placement: need %d, have %d", minTotalDisks, len(availableDisks))
if len(availableDisks) < erasure_coding.MinTotalDisks {
return nil, fmt.Errorf("insufficient disks for EC placement: need %d, have %d", erasure_coding.MinTotalDisks, len(availableDisks))
}
// Select best disks for EC placement with rack/DC diversity
selectedDisks := selectBestECDestinations(availableDisks, sourceRack, sourceDC, erasure_coding.TotalShardsCount)
if len(selectedDisks) < minTotalDisks {
return nil, fmt.Errorf("found %d disks, but could not find %d suitable destinations for EC placement", len(selectedDisks), minTotalDisks)
if len(selectedDisks) < erasure_coding.MinTotalDisks {
return nil, fmt.Errorf("found %d disks, but could not find %d suitable destinations for EC placement", len(selectedDisks), erasure_coding.MinTotalDisks)
}
var plans []*topology.DestinationPlan

View File

@ -81,9 +81,6 @@ func (t *ErasureCodingTask) Execute(ctx context.Context, params *worker_pb.TaskP
// Use the working directory from task parameters, or fall back to a default
baseWorkDir := t.workDir
if baseWorkDir == "" {
baseWorkDir = "/tmp/seaweedfs_ec_work"
}
// Create unique working directory for this task
taskWorkDir := filepath.Join(baseWorkDir, fmt.Sprintf("vol_%d_%d", t.volumeID, time.Now().Unix()))