volume: add option to limit compaction speed

This commit is contained in:
Chris Lu
2019-05-03 17:22:39 -07:00
parent f0f981e7c8
commit b335f81a4f
9 changed files with 69 additions and 33 deletions

View File

@@ -18,7 +18,7 @@ func (v *Volume) garbageLevel() float64 {
return float64(v.nm.DeletedSize()) / float64(v.ContentSize())
}
func (v *Volume) Compact(preallocate int64) error {
func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error {
glog.V(3).Infof("Compacting volume %d ...", v.Id)
//no need to lock for copy on write
//v.accessLock.Lock()
@@ -29,7 +29,7 @@ func (v *Volume) Compact(preallocate int64) error {
v.lastCompactIndexOffset = v.nm.IndexFileSize()
v.lastCompactRevision = v.SuperBlock.CompactionRevision
glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset)
return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate)
return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond)
}
func (v *Volume) Compact2() error {
@@ -236,12 +236,15 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
}
type VolumeFileScanner4Vacuum struct {
version needle.Version
v *Volume
dst *os.File
nm *NeedleMap
newOffset int64
now uint64
version needle.Version
v *Volume
dst *os.File
nm *NeedleMap
newOffset int64
now uint64
compactionBytePerSecond int64
lastSizeCounter int64
lastSizeCheckTime time.Time
}
func (scanner *VolumeFileScanner4Vacuum) VisitSuperBlock(superBlock SuperBlock) error {
@@ -269,13 +272,32 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in
if _, _, _, err := n.Append(scanner.dst, scanner.v.Version()); err != nil {
return fmt.Errorf("cannot append needle: %s", err)
}
scanner.newOffset += n.DiskSize(scanner.version)
delta := n.DiskSize(scanner.version)
scanner.newOffset += delta
scanner.maybeSlowdown(delta)
glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size)
}
return nil
}
func (scanner *VolumeFileScanner4Vacuum) maybeSlowdown(delta int64) {
if scanner.compactionBytePerSecond > 0 {
scanner.lastSizeCounter += delta
now := time.Now()
elapsedDuration := now.Sub(scanner.lastSizeCheckTime)
if elapsedDuration > 100*time.Millisecond {
overLimitBytes := scanner.lastSizeCounter - scanner.compactionBytePerSecond/10
if overLimitBytes > 0 {
overRatio := float64(overLimitBytes) / float64(scanner.compactionBytePerSecond)
sleepTime := time.Duration(overRatio*1000) * time.Millisecond
// glog.V(0).Infof("currently %d bytes, limit to %d bytes, over by %d bytes, sleeping %v over %.4f", scanner.lastSizeCounter, scanner.compactionBytePerSecond/10, overLimitBytes, sleepTime, overRatio)
time.Sleep(sleepTime)
}
scanner.lastSizeCounter, scanner.lastSizeCheckTime = 0, time.Now()
}
}
}
func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, preallocate int64) (err error) {
func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, preallocate int64, compactionBytePerSecond int64) (err error) {
var (
dst, idx *os.File
)
@@ -290,10 +312,12 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca
defer idx.Close()
scanner := &VolumeFileScanner4Vacuum{
v: v,
now: uint64(time.Now().Unix()),
nm: NewBtreeNeedleMap(idx),
dst: dst,
v: v,
now: uint64(time.Now().Unix()),
nm: NewBtreeNeedleMap(idx),
dst: dst,
compactionBytePerSecond: compactionBytePerSecond,
lastSizeCheckTime: time.Now(),
}
err = ScanVolumeFile(v.dir, v.Collection, v.Id, v.needleMapKind, scanner)
return