mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-08-24 10:33:06 +08:00
ec.balance: collect dc rack info
This commit is contained in:
parent
7912a812f1
commit
9d9162ca35
@ -145,25 +145,29 @@ func balanceEcVolumes(commandEnv *CommandEnv, collection string, applyBalancing
|
|||||||
func doBalanceEcShards(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, allEcNodes []*EcNode, applyBalancing bool) error {
|
func doBalanceEcShards(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, allEcNodes []*EcNode, applyBalancing bool) error {
|
||||||
// collect all ec nodes with at least one free slot
|
// collect all ec nodes with at least one free slot
|
||||||
var possibleDestinationEcNodes []*EcNode
|
var possibleDestinationEcNodes []*EcNode
|
||||||
|
possibleDataCenters := make(map[string]int)
|
||||||
|
possibleRacks := make(map[string]int)
|
||||||
for _, ecNode := range allEcNodes {
|
for _, ecNode := range allEcNodes {
|
||||||
if ecNode.freeEcSlot > 0 {
|
if ecNode.freeEcSlot > 0 {
|
||||||
possibleDestinationEcNodes = append(possibleDestinationEcNodes, ecNode)
|
possibleDestinationEcNodes = append(possibleDestinationEcNodes, ecNode)
|
||||||
|
possibleDataCenters[ecNode.dc] += ecNode.freeEcSlot
|
||||||
|
possibleRacks[ecNode.dc+"/"+ecNode.rack] += ecNode.freeEcSlot
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// calculate average number of shards an ec node should have for one volume
|
// calculate average number of shards an ec node should have for one volume
|
||||||
averageShardsPerEcNode := int(math.Ceil(float64(erasure_coding.TotalShardsCount) / float64(len(possibleDestinationEcNodes))))
|
averageShardsPerEcNode := int(math.Ceil(float64(erasure_coding.TotalShardsCount) / float64(len(possibleDestinationEcNodes))))
|
||||||
fmt.Printf("vid %d averageShardsPerEcNode %+v\n", vid, averageShardsPerEcNode)
|
fmt.Printf("vid %d averageShards Per EcNode:%d\n", vid, averageShardsPerEcNode)
|
||||||
// check whether this volume has ecNodes that are over average
|
// check whether this volume has ecNodes that are over average
|
||||||
isOverLimit := false
|
isOverPerNodeAverage := false
|
||||||
for _, ecNode := range locations {
|
for _, ecNode := range locations {
|
||||||
shardBits := findEcVolumeShards(ecNode, vid)
|
shardBits := findEcVolumeShards(ecNode, vid)
|
||||||
if shardBits.ShardIdCount() > averageShardsPerEcNode {
|
if shardBits.ShardIdCount() > averageShardsPerEcNode {
|
||||||
isOverLimit = true
|
isOverPerNodeAverage = true
|
||||||
fmt.Printf("vid %d %s has %d shards, isOverLimit %+v\n", vid, ecNode.info.Id, shardBits.ShardIdCount(), isOverLimit)
|
fmt.Printf("vid %d %s has %d shards, isOverPerNodeAverage %+v\n", vid, ecNode.info.Id, shardBits.ShardIdCount(), isOverPerNodeAverage)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if isOverLimit {
|
if isOverPerNodeAverage {
|
||||||
if err := spreadShardsIntoMoreDataNodes(ctx, commandEnv, averageShardsPerEcNode, collection, vid, locations, possibleDestinationEcNodes, applyBalancing); err != nil {
|
if err := spreadShardsIntoMoreDataNodes(ctx, commandEnv, averageShardsPerEcNode, collection, vid, locations, possibleDestinationEcNodes, applyBalancing); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -275,11 +279,11 @@ func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.Shar
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func addEcVolumeShards(ecNode *EcNode, vid needle.VolumeId, shardIds []uint32){
|
func addEcVolumeShards(ecNode *EcNode, vid needle.VolumeId, shardIds []uint32) {
|
||||||
|
|
||||||
for _, shardInfo := range ecNode.info.EcShardInfos {
|
for _, shardInfo := range ecNode.info.EcShardInfos {
|
||||||
if needle.VolumeId(shardInfo.Id) == vid {
|
if needle.VolumeId(shardInfo.Id) == vid {
|
||||||
for _, shardId := range shardIds{
|
for _, shardId := range shardIds {
|
||||||
shardInfo.EcIndexBits = uint32(erasure_coding.ShardBits(shardInfo.EcIndexBits).AddShardId(erasure_coding.ShardId(shardId)))
|
shardInfo.EcIndexBits = uint32(erasure_coding.ShardBits(shardInfo.EcIndexBits).AddShardId(erasure_coding.ShardId(shardId)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -287,11 +291,11 @@ func addEcVolumeShards(ecNode *EcNode, vid needle.VolumeId, shardIds []uint32){
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func deleteEcVolumeShards(ecNode *EcNode, vid needle.VolumeId, shardIds []uint32){
|
func deleteEcVolumeShards(ecNode *EcNode, vid needle.VolumeId, shardIds []uint32) {
|
||||||
|
|
||||||
for _, shardInfo := range ecNode.info.EcShardInfos {
|
for _, shardInfo := range ecNode.info.EcShardInfos {
|
||||||
if needle.VolumeId(shardInfo.Id) == vid {
|
if needle.VolumeId(shardInfo.Id) == vid {
|
||||||
for _, shardId := range shardIds{
|
for _, shardId := range shardIds {
|
||||||
shardInfo.EcIndexBits = uint32(erasure_coding.ShardBits(shardInfo.EcIndexBits).RemoveShardId(erasure_coding.ShardId(shardId)))
|
shardInfo.EcIndexBits = uint32(erasure_coding.ShardBits(shardInfo.EcIndexBits).RemoveShardId(erasure_coding.ShardId(shardId)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,11 +98,11 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func eachDataNode(topo *master_pb.TopologyInfo, fn func(*master_pb.DataNodeInfo)) {
|
func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc, rack string, dn *master_pb.DataNodeInfo)) {
|
||||||
for _, dc := range topo.DataCenterInfos {
|
for _, dc := range topo.DataCenterInfos {
|
||||||
for _, rack := range dc.RackInfos {
|
for _, rack := range dc.RackInfos {
|
||||||
for _, dn := range rack.DataNodeInfos {
|
for _, dn := range rack.DataNodeInfos {
|
||||||
fn(dn)
|
fn(dc.Id, rack.Id, dn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -128,6 +128,8 @@ func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) {
|
|||||||
|
|
||||||
type EcNode struct {
|
type EcNode struct {
|
||||||
info *master_pb.DataNodeInfo
|
info *master_pb.DataNodeInfo
|
||||||
|
dc string
|
||||||
|
rack string
|
||||||
freeEcSlot int
|
freeEcSlot int
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,10 +146,12 @@ func collectEcNodes(ctx context.Context, commandEnv *CommandEnv) (ecNodes []*EcN
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find out all volume servers with one slot left.
|
// find out all volume servers with one slot left.
|
||||||
eachDataNode(resp.TopologyInfo, func(dn *master_pb.DataNodeInfo) {
|
eachDataNode(resp.TopologyInfo, func(dc, rack string, dn *master_pb.DataNodeInfo) {
|
||||||
if freeEcSlots := countFreeShardSlots(dn); freeEcSlots > 0 {
|
if freeEcSlots := countFreeShardSlots(dn); freeEcSlots > 0 {
|
||||||
ecNodes = append(ecNodes, &EcNode{
|
ecNodes = append(ecNodes, &EcNode{
|
||||||
info: dn,
|
info: dn,
|
||||||
|
dc: dc,
|
||||||
|
rack: rack,
|
||||||
freeEcSlot: int(freeEcSlots),
|
freeEcSlot: int(freeEcSlots),
|
||||||
})
|
})
|
||||||
totalFreeEcSlots += freeEcSlots
|
totalFreeEcSlots += freeEcSlots
|
||||||
|
Loading…
Reference in New Issue
Block a user