mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-10-21 21:38:51 +08:00
Merge branch 'master' into refactoring_dat_backend
This commit is contained in:
@@ -165,8 +165,9 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
|
||||
var maxFileKey NeedleId
|
||||
collectionVolumeSize := make(map[string]uint64)
|
||||
for _, location := range s.Locations {
|
||||
var deleteVids []needle.VolumeId
|
||||
maxVolumeCount = maxVolumeCount + location.MaxVolumeCount
|
||||
location.Lock()
|
||||
location.RLock()
|
||||
for _, v := range location.volumes {
|
||||
if maxFileKey < v.MaxFileKey() {
|
||||
maxFileKey = v.MaxFileKey()
|
||||
@@ -175,8 +176,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
|
||||
volumeMessages = append(volumeMessages, v.ToVolumeInformationMessage())
|
||||
} else {
|
||||
if v.expiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) {
|
||||
location.deleteVolumeById(v.Id)
|
||||
glog.V(0).Infoln("volume", v.Id, "is deleted.")
|
||||
deleteVids = append(deleteVids, v.Id)
|
||||
} else {
|
||||
glog.V(0).Infoln("volume", v.Id, "is expired.")
|
||||
}
|
||||
@@ -184,7 +184,17 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
|
||||
fileSize, _, _ := v.FileStat()
|
||||
collectionVolumeSize[v.Collection] += fileSize
|
||||
}
|
||||
location.Unlock()
|
||||
location.RUnlock()
|
||||
|
||||
if len(deleteVids) > 0 {
|
||||
// delete expired volumes.
|
||||
location.Lock()
|
||||
for _, vid := range deleteVids {
|
||||
location.deleteVolumeById(vid)
|
||||
glog.V(0).Infoln("volume", vid, "is deleted.")
|
||||
}
|
||||
location.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
for col, size := range collectionVolumeSize {
|
||||
|
@@ -288,7 +288,7 @@ func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes [
|
||||
}
|
||||
|
||||
for _, sourceDataNode := range sourceDataNodes {
|
||||
glog.V(4).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode)
|
||||
glog.V(3).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode)
|
||||
n, is_deleted, err = s.doReadRemoteEcShardInterval(ctx, sourceDataNode, needleId, vid, shardId, buf, offset)
|
||||
if err == nil {
|
||||
return
|
||||
@@ -340,7 +340,7 @@ func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode
|
||||
}
|
||||
|
||||
func (s *Store) recoverOneRemoteEcShardInterval(ctx context.Context, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) {
|
||||
glog.V(4).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
|
||||
glog.V(3).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover)
|
||||
|
||||
enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount)
|
||||
if err != nil {
|
||||
|
Reference in New Issue
Block a user