mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-08-24 21:29:04 +08:00
avoid possible nil disk info
This commit is contained in:
parent
43101ccea0
commit
36f95e50a9
@ -388,7 +388,10 @@ func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
ecNodeIdToShardCount := groupByCount(rackEcNodes, func(ecNode *EcNode) (id string, count int) {
|
ecNodeIdToShardCount := groupByCount(rackEcNodes, func(ecNode *EcNode) (id string, count int) {
|
||||||
diskInfo := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
||||||
|
if !found {
|
||||||
|
return
|
||||||
|
}
|
||||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||||
count += erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIdCount()
|
count += erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIdCount()
|
||||||
}
|
}
|
||||||
@ -413,11 +416,12 @@ func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool
|
|||||||
if fullNodeShardCount > averageShardCount && emptyNodeShardCount+1 <= averageShardCount {
|
if fullNodeShardCount > averageShardCount && emptyNodeShardCount+1 <= averageShardCount {
|
||||||
|
|
||||||
emptyNodeIds := make(map[uint32]bool)
|
emptyNodeIds := make(map[uint32]bool)
|
||||||
emptyDiskInfo := emptyNode.info.DiskInfos[string(types.HardDriveType)]
|
if emptyDiskInfo, found := emptyNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||||
for _, shards := range emptyDiskInfo.EcShardInfos {
|
for _, shards := range emptyDiskInfo.EcShardInfos {
|
||||||
emptyNodeIds[shards.Id] = true
|
emptyNodeIds[shards.Id] = true
|
||||||
}
|
}
|
||||||
fullDiskInfo := fullNode.info.DiskInfos[string(types.HardDriveType)]
|
}
|
||||||
|
if fullDiskInfo, found := fullNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||||
for _, shards := range fullDiskInfo.EcShardInfos {
|
for _, shards := range fullDiskInfo.EcShardInfos {
|
||||||
if _, found := emptyNodeIds[shards.Id]; !found {
|
if _, found := emptyNodeIds[shards.Id]; !found {
|
||||||
for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() {
|
for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() {
|
||||||
@ -439,6 +443,7 @@ func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -515,7 +520,10 @@ func pickNEcShardsToMoveFrom(ecNodes []*EcNode, vid needle.VolumeId, n int) map[
|
|||||||
func collectVolumeIdToEcNodes(allEcNodes []*EcNode) map[needle.VolumeId][]*EcNode {
|
func collectVolumeIdToEcNodes(allEcNodes []*EcNode) map[needle.VolumeId][]*EcNode {
|
||||||
vidLocations := make(map[needle.VolumeId][]*EcNode)
|
vidLocations := make(map[needle.VolumeId][]*EcNode)
|
||||||
for _, ecNode := range allEcNodes {
|
for _, ecNode := range allEcNodes {
|
||||||
diskInfo := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
||||||
|
if !found {
|
||||||
|
continue
|
||||||
|
}
|
||||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||||
vidLocations[needle.VolumeId(shardInfo.Id)] = append(vidLocations[needle.VolumeId(shardInfo.Id)], ecNode)
|
vidLocations[needle.VolumeId(shardInfo.Id)] = append(vidLocations[needle.VolumeId(shardInfo.Id)], ecNode)
|
||||||
}
|
}
|
||||||
|
@ -288,12 +288,13 @@ func ceilDivide(total, n int) int {
|
|||||||
|
|
||||||
func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits {
|
func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits {
|
||||||
|
|
||||||
diskInfo := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
if diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||||
if needle.VolumeId(shardInfo.Id) == vid {
|
if needle.VolumeId(shardInfo.Id) == vid {
|
||||||
return erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
return erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@ -301,7 +302,7 @@ func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.Shar
|
|||||||
func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, shardIds []uint32) *EcNode {
|
func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, shardIds []uint32) *EcNode {
|
||||||
|
|
||||||
foundVolume := false
|
foundVolume := false
|
||||||
diskInfo := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
if diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||||
if needle.VolumeId(shardInfo.Id) == vid {
|
if needle.VolumeId(shardInfo.Id) == vid {
|
||||||
oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||||
@ -315,6 +316,7 @@ func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string,
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !foundVolume {
|
if !foundVolume {
|
||||||
var newShardBits erasure_coding.ShardBits
|
var newShardBits erasure_coding.ShardBits
|
||||||
@ -335,7 +337,7 @@ func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string,
|
|||||||
|
|
||||||
func (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint32) *EcNode {
|
func (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint32) *EcNode {
|
||||||
|
|
||||||
diskInfo := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
if diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||||
if needle.VolumeId(shardInfo.Id) == vid {
|
if needle.VolumeId(shardInfo.Id) == vid {
|
||||||
oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||||
@ -347,6 +349,7 @@ func (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint3
|
|||||||
ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
|
ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return ecNode
|
return ecNode
|
||||||
}
|
}
|
||||||
|
@ -226,12 +226,13 @@ func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyIn
|
|||||||
func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) {
|
func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) {
|
||||||
|
|
||||||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||||
diskInfo := dn.DiskInfos[string(types.HardDriveType)]
|
if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
|
||||||
for _, v := range diskInfo.EcShardInfos {
|
for _, v := range diskInfo.EcShardInfos {
|
||||||
if v.Collection == selectedCollection && v.Id == uint32(vid) {
|
if v.Collection == selectedCollection && v.Id == uint32(vid) {
|
||||||
ecShardInfos = append(ecShardInfos, v)
|
ecShardInfos = append(ecShardInfos, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
return
|
return
|
||||||
@ -241,12 +242,13 @@ func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection stri
|
|||||||
|
|
||||||
vidMap := make(map[uint32]bool)
|
vidMap := make(map[uint32]bool)
|
||||||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||||
diskInfo := dn.DiskInfos[string(types.HardDriveType)]
|
if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
|
||||||
for _, v := range diskInfo.EcShardInfos {
|
for _, v := range diskInfo.EcShardInfos {
|
||||||
if v.Collection == selectedCollection {
|
if v.Collection == selectedCollection {
|
||||||
vidMap[v.Id] = true
|
vidMap[v.Id] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
for vid := range vidMap {
|
for vid := range vidMap {
|
||||||
@ -260,12 +262,13 @@ func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeI
|
|||||||
|
|
||||||
nodeToEcIndexBits := make(map[string]erasure_coding.ShardBits)
|
nodeToEcIndexBits := make(map[string]erasure_coding.ShardBits)
|
||||||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||||
diskInfo := dn.DiskInfos[string(types.HardDriveType)]
|
if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
|
||||||
for _, v := range diskInfo.EcShardInfos {
|
for _, v := range diskInfo.EcShardInfos {
|
||||||
if v.Id == uint32(vid) {
|
if v.Id == uint32(vid) {
|
||||||
nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits)
|
nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
return nodeToEcIndexBits
|
return nodeToEcIndexBits
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||||
"io"
|
"io"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
@ -167,7 +168,8 @@ func (c *commandVolumeFixReplication) fixUnderReplicatedVolumes(commandEnv *Comm
|
|||||||
keepDataNodesSorted(allLocations, replica.info.DiskType)
|
keepDataNodesSorted(allLocations, replica.info.DiskType)
|
||||||
for _, dst := range allLocations {
|
for _, dst := range allLocations {
|
||||||
// check whether data nodes satisfy the constraints
|
// check whether data nodes satisfy the constraints
|
||||||
if dst.dataNode.DiskInfos[replica.info.DiskType].FreeVolumeCount > 0 && satisfyReplicaPlacement(replicaPlacement, replicas, dst) {
|
fn := capacityByFreeVolumeCount(types.ToDiskType(replica.info.DiskType))
|
||||||
|
if fn(dst.dataNode) > 0 && satisfyReplicaPlacement(replicaPlacement, replicas, dst) {
|
||||||
// check collection name pattern
|
// check collection name pattern
|
||||||
if *c.collectionPattern != "" {
|
if *c.collectionPattern != "" {
|
||||||
matched, err := filepath.Match(*c.collectionPattern, replica.info.Collection)
|
matched, err := filepath.Match(*c.collectionPattern, replica.info.Collection)
|
||||||
@ -218,8 +220,9 @@ func (c *commandVolumeFixReplication) fixUnderReplicatedVolumes(commandEnv *Comm
|
|||||||
}
|
}
|
||||||
|
|
||||||
func keepDataNodesSorted(dataNodes []location, diskType string) {
|
func keepDataNodesSorted(dataNodes []location, diskType string) {
|
||||||
|
fn := capacityByFreeVolumeCount(types.ToDiskType(diskType))
|
||||||
sort.Slice(dataNodes, func(i, j int) bool {
|
sort.Slice(dataNodes, func(i, j int) bool {
|
||||||
return dataNodes[i].dataNode.DiskInfos[diskType].FreeVolumeCount > dataNodes[j].dataNode.DiskInfos[diskType].FreeVolumeCount
|
return fn(dataNodes[i].dataNode) > fn(dataNodes[j].dataNode)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user